id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_good_3498_6 | /*
* Implement CPU time clocks for the POSIX clock interface.
*/
#include <linux/sched.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
static int check_clock(const clockid_t which_clock)
{
int error = 0;
struct task_struct *p;
const pid_t pid = CPUCLOCK_PID(which_clock);
if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
return -EINVAL;
if (pid == 0)
return 0;
read_lock(&tasklist_lock);
p = find_task_by_vpid(pid);
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
same_thread_group(p, current) : thread_group_leader(p))) {
error = -EINVAL;
}
read_unlock(&tasklist_lock);
return error;
}
static inline union cpu_time_count
timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
{
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret.cpu = timespec_to_cputime(tp);
}
return ret;
}
static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu,
struct timespec *tp)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
*tp = ns_to_timespec(cpu.sched);
else
cputime_to_timespec(cpu.cpu, tp);
}
static inline int cpu_time_before(const clockid_t which_clock,
union cpu_time_count now,
union cpu_time_count then)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
return now.sched < then.sched;
} else {
return cputime_lt(now.cpu, then.cpu);
}
}
static inline void cpu_time_add(const clockid_t which_clock,
union cpu_time_count *acc,
union cpu_time_count val)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
acc->sched += val.sched;
} else {
acc->cpu = cputime_add(acc->cpu, val.cpu);
}
}
static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
union cpu_time_count a,
union cpu_time_count b)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
a.sched -= b.sched;
} else {
a.cpu = cputime_sub(a.cpu, b.cpu);
}
return a;
}
/*
* Divide and limit the result to res >= 1
*
* This is necessary to prevent signal delivery starvation, when the result of
* the division would be rounded down to 0.
*/
static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
{
cputime_t res = cputime_div(time, div);
return max_t(cputime_t, res, 1);
}
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
static void bump_cpu_timer(struct k_itimer *timer,
union cpu_time_count now)
{
int i;
if (timer->it.cpu.incr.sched == 0)
return;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
unsigned long long delta, incr;
if (now.sched < timer->it.cpu.expires.sched)
return;
incr = timer->it.cpu.incr.sched;
delta = now.sched + incr - timer->it.cpu.expires.sched;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
if (delta < incr)
continue;
timer->it.cpu.expires.sched += incr;
timer->it_overrun += 1 << i;
delta -= incr;
}
} else {
cputime_t delta, incr;
if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
return;
incr = timer->it.cpu.incr.cpu;
delta = cputime_sub(cputime_add(now.cpu, incr),
timer->it.cpu.expires.cpu);
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
incr = cputime_add(incr, incr);
for (; i >= 0; incr = cputime_halve(incr), i--) {
if (cputime_lt(delta, incr))
continue;
timer->it.cpu.expires.cpu =
cputime_add(timer->it.cpu.expires.cpu, incr);
timer->it_overrun += 1 << i;
delta = cputime_sub(delta, incr);
}
}
}
static inline cputime_t prof_ticks(struct task_struct *p)
{
return cputime_add(p->utime, p->stime);
}
static inline cputime_t virt_ticks(struct task_struct *p)
{
return p->utime;
}
static inline unsigned long long sched_ns(struct task_struct *p)
{
return task_sched_runtime(p);
}
int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
int error = check_clock(which_clock);
if (!error) {
tp->tv_sec = 0;
tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
/*
* If sched_clock is using a cycle counter, we
* don't have any idea of its true resolution
* exported, but it is much more than 1s/HZ.
*/
tp->tv_nsec = 1;
}
}
return error;
}
int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
* in the call before failing with EPERM.
*/
int error = check_clock(which_clock);
if (error == 0) {
error = -EPERM;
}
return error;
}
/*
* Sample a per-thread clock for the given task.
*/
static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
union cpu_time_count *cpu)
{
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
cpu->cpu = prof_ticks(p);
break;
case CPUCLOCK_VIRT:
cpu->cpu = virt_ticks(p);
break;
case CPUCLOCK_SCHED:
cpu->sched = sched_ns(p);
break;
}
return 0;
}
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
* Must be called with tasklist_lock held for reading, and p->sighand->siglock.
*/
static int cpu_clock_sample_group_locked(unsigned int clock_idx,
struct task_struct *p,
union cpu_time_count *cpu)
{
struct task_struct *t = p;
switch (clock_idx) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
do {
cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_VIRT:
cpu->cpu = p->signal->utime;
do {
cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_SCHED:
cpu->sched = p->signal->sum_sched_runtime;
/* Add in each other live thread. */
while ((t = next_thread(t)) != p) {
cpu->sched += t->se.sum_exec_runtime;
}
cpu->sched += sched_ns(p);
break;
}
return 0;
}
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
*/
static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
union cpu_time_count *cpu)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
cpu);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
return ret;
}
int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
int error = -EINVAL;
union cpu_time_count rtn;
if (pid == 0) {
/*
* Special case constant value for our own clocks.
* We don't have to do any lookup to find ourselves.
*/
if (CPUCLOCK_PERTHREAD(which_clock)) {
/*
* Sampling just ourselves we can do with no locking.
*/
error = cpu_clock_sample(which_clock,
current, &rtn);
} else {
read_lock(&tasklist_lock);
error = cpu_clock_sample_group(which_clock,
current, &rtn);
read_unlock(&tasklist_lock);
}
} else {
/*
* Find the given PID, and validate that the caller
* should be able to see it.
*/
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p) {
if (CPUCLOCK_PERTHREAD(which_clock)) {
if (same_thread_group(p, current)) {
error = cpu_clock_sample(which_clock,
p, &rtn);
}
} else {
read_lock(&tasklist_lock);
if (thread_group_leader(p) && p->signal) {
error =
cpu_clock_sample_group(which_clock,
p, &rtn);
}
read_unlock(&tasklist_lock);
}
}
rcu_read_unlock();
}
if (error)
return error;
sample_to_timespec(which_clock, rtn, tp);
return 0;
}
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
* This is called from sys_timer_create with the new timer already locked.
*/
int posix_cpu_timer_create(struct k_itimer *new_timer)
{
int ret = 0;
const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
struct task_struct *p;
if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
return -EINVAL;
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
new_timer->it.cpu.incr.sched = 0;
new_timer->it.cpu.expires.sched = 0;
read_lock(&tasklist_lock);
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
if (pid == 0) {
p = current;
} else {
p = find_task_by_vpid(pid);
if (p && !same_thread_group(p, current))
p = NULL;
}
} else {
if (pid == 0) {
p = current->group_leader;
} else {
p = find_task_by_vpid(pid);
if (p && !thread_group_leader(p))
p = NULL;
}
}
new_timer->it.cpu.task = p;
if (p) {
get_task_struct(p);
} else {
ret = -EINVAL;
}
read_unlock(&tasklist_lock);
return ret;
}
/*
* Clean up a CPU-clock timer that is about to be destroyed.
* This is called from timer deletion with the timer already locked.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
int ret = 0;
if (likely(p != NULL)) {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
* We raced with the reaping of the task.
* The deletion should have cleared us off the list.
*/
BUG_ON(!list_empty(&timer->it.cpu.entry));
} else {
spin_lock(&p->sighand->siglock);
if (timer->it.cpu.firing)
ret = TIMER_RETRY;
else
list_del(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
}
read_unlock(&tasklist_lock);
if (!ret)
put_task_struct(p);
}
return ret;
}
/*
* Clean out CPU timers still ticking when a thread exited. The task
* pointer is cleared, and the expiry time is replaced with the residual
* time for later timer_gettime calls to return.
* This must be called with the siglock held.
*/
static void cleanup_timers(struct list_head *head,
cputime_t utime, cputime_t stime,
unsigned long long sum_exec_runtime)
{
struct cpu_timer_list *timer, *next;
cputime_t ptime = cputime_add(utime, stime);
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) {
timer->expires.cpu = cputime_zero;
} else {
timer->expires.cpu = cputime_sub(timer->expires.cpu,
ptime);
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) {
timer->expires.cpu = cputime_zero;
} else {
timer->expires.cpu = cputime_sub(timer->expires.cpu,
utime);
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (timer->expires.sched < sum_exec_runtime) {
timer->expires.sched = 0;
} else {
timer->expires.sched -= sum_exec_runtime;
}
}
}
/*
* These are both called with the siglock held, when the current thread
* is being reaped. When the final (leader) thread in the group is reaped,
* posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
cleanup_timers(tsk->cpu_timers,
tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
cleanup_timers(tsk->signal->cpu_timers,
cputime_add(tsk->utime, tsk->signal->utime),
cputime_add(tsk->stime, tsk->signal->stime),
tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
}
/*
* Set the expiry times of all the threads in the process so one of them
* will go off before the process cumulative expiry total is reached.
*/
static void process_timer_rebalance(struct task_struct *p,
unsigned int clock_idx,
union cpu_time_count expires,
union cpu_time_count val)
{
cputime_t ticks, left;
unsigned long long ns, nsleft;
struct task_struct *t = p;
unsigned int nthreads = atomic_read(&p->signal->live);
if (!nthreads)
return;
switch (clock_idx) {
default:
BUG();
break;
case CPUCLOCK_PROF:
left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
if (likely(!(t->flags & PF_EXITING))) {
ticks = cputime_add(prof_ticks(t), left);
if (cputime_eq(t->it_prof_expires,
cputime_zero) ||
cputime_gt(t->it_prof_expires, ticks)) {
t->it_prof_expires = ticks;
}
}
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_VIRT:
left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
if (likely(!(t->flags & PF_EXITING))) {
ticks = cputime_add(virt_ticks(t), left);
if (cputime_eq(t->it_virt_expires,
cputime_zero) ||
cputime_gt(t->it_virt_expires, ticks)) {
t->it_virt_expires = ticks;
}
}
t = next_thread(t);
} while (t != p);
break;
case CPUCLOCK_SCHED:
nsleft = expires.sched - val.sched;
do_div(nsleft, nthreads);
nsleft = max_t(unsigned long long, nsleft, 1);
do {
if (likely(!(t->flags & PF_EXITING))) {
ns = t->se.sum_exec_runtime + nsleft;
if (t->it_sched_expires == 0 ||
t->it_sched_expires > ns) {
t->it_sched_expires = ns;
}
}
t = next_thread(t);
} while (t != p);
break;
}
}
static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
{
/*
* That's all for this thread or process.
* We leave our residual in expires to be reported.
*/
put_task_struct(timer->it.cpu.task);
timer->it.cpu.task = NULL;
timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
timer->it.cpu.expires,
now);
}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
* for reading, and interrupts disabled.
*/
static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
{
struct task_struct *p = timer->it.cpu.task;
struct list_head *head, *listpos;
struct cpu_timer_list *const nt = &timer->it.cpu;
struct cpu_timer_list *next;
unsigned long i;
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
BUG_ON(!irqs_disabled());
spin_lock(&p->sighand->siglock);
listpos = head;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
list_for_each_entry(next, head, entry) {
if (next->expires.sched > nt->expires.sched)
break;
listpos = &next->entry;
}
} else {
list_for_each_entry(next, head, entry) {
if (cputime_gt(next->expires.cpu, nt->expires.cpu))
break;
listpos = &next->entry;
}
}
list_add(&nt->entry, listpos);
if (listpos == head) {
/*
* We are the new earliest-expiring timer.
* If we are a thread timer, there can always
* be a process timer telling us to stop earlier.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
BUG();
case CPUCLOCK_PROF:
if (cputime_eq(p->it_prof_expires,
cputime_zero) ||
cputime_gt(p->it_prof_expires,
nt->expires.cpu))
p->it_prof_expires = nt->expires.cpu;
break;
case CPUCLOCK_VIRT:
if (cputime_eq(p->it_virt_expires,
cputime_zero) ||
cputime_gt(p->it_virt_expires,
nt->expires.cpu))
p->it_virt_expires = nt->expires.cpu;
break;
case CPUCLOCK_SCHED:
if (p->it_sched_expires == 0 ||
p->it_sched_expires > nt->expires.sched)
p->it_sched_expires = nt->expires.sched;
break;
}
} else {
/*
* For a process timer, we must balance
* all the live threads' expirations.
*/
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
BUG();
case CPUCLOCK_VIRT:
if (!cputime_eq(p->signal->it_virt_expires,
cputime_zero) &&
cputime_lt(p->signal->it_virt_expires,
timer->it.cpu.expires.cpu))
break;
goto rebalance;
case CPUCLOCK_PROF:
if (!cputime_eq(p->signal->it_prof_expires,
cputime_zero) &&
cputime_lt(p->signal->it_prof_expires,
timer->it.cpu.expires.cpu))
break;
i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
if (i != RLIM_INFINITY &&
i <= cputime_to_secs(timer->it.cpu.expires.cpu))
break;
goto rebalance;
case CPUCLOCK_SCHED:
rebalance:
process_timer_rebalance(
timer->it.cpu.task,
CPUCLOCK_WHICH(timer->it_clock),
timer->it.cpu.expires, now);
break;
}
}
}
spin_unlock(&p->sighand->siglock);
}
/*
* The timer is locked, fire it and arrange for its reload.
*/
static void cpu_timer_fire(struct k_itimer *timer)
{
if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
*/
wake_up_process(timer->it_process);
timer->it.cpu.expires.sched = 0;
} else if (timer->it.cpu.incr.sched == 0) {
/*
* One-shot timer. Clear it as soon as it's fired.
*/
posix_timer_event(timer, 0);
timer->it.cpu.expires.sched = 0;
} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
* reload the timer. But we need to keep it
* ticking in case the signal is deliverable next time.
*/
posix_cpu_timer_schedule(timer);
}
}
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
int posix_cpu_timer_set(struct k_itimer *timer, int flags,
struct itimerspec *new, struct itimerspec *old)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count old_expires, new_expires, val;
int ret;
if (unlikely(p == NULL)) {
/*
* Timer refers to a dead task's clock.
*/
return -ESRCH;
}
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
read_lock(&tasklist_lock);
/*
* We need the tasklist_lock to protect against reaping that
* clears p->signal. If p has just been reaped, we can no
* longer get any information about it at all.
*/
if (unlikely(p->signal == NULL)) {
read_unlock(&tasklist_lock);
put_task_struct(p);
timer->it.cpu.task = NULL;
return -ESRCH;
}
/*
* Disarm any old timer after extracting its expiry time.
*/
BUG_ON(!irqs_disabled());
ret = 0;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1;
ret = TIMER_RETRY;
} else
list_del_init(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
/*
* We need to sample the current value to convert the new
* value from to relative and absolute, and to convert the
* old value from absolute to relative. To set a process
* timer, we need a sample to balance the thread expiry
* times (in arm_timer). With an absolute time, we must
* check if it's already passed. In short, we need a sample.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &val);
} else {
cpu_clock_sample_group(timer->it_clock, p, &val);
}
if (old) {
if (old_expires.sched == 0) {
old->it_value.tv_sec = 0;
old->it_value.tv_nsec = 0;
} else {
/*
* Update the timer in case it has
* overrun already. If it has,
* we'll report it as having overrun
* and with the next reloaded timer
* already ticking, though we are
* swallowing that pending
* notification here to install the
* new setting.
*/
bump_cpu_timer(timer, val);
if (cpu_time_before(timer->it_clock, val,
timer->it.cpu.expires)) {
old_expires = cpu_time_sub(
timer->it_clock,
timer->it.cpu.expires, val);
sample_to_timespec(timer->it_clock,
old_expires,
&old->it_value);
} else {
old->it_value.tv_nsec = 1;
old->it_value.tv_sec = 0;
}
}
}
if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
read_unlock(&tasklist_lock);
goto out;
}
if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
cpu_time_add(timer->it_clock, &new_expires, val);
}
/*
* Install the new expiry time (or zero).
* For a timer with no notification action, we don't actually
* arm the timer (we'll just fake it for timer_gettime).
*/
timer->it.cpu.expires = new_expires;
if (new_expires.sched != 0 &&
(timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
cpu_time_before(timer->it_clock, val, new_expires)) {
arm_timer(timer, val);
}
read_unlock(&tasklist_lock);
/*
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
&new->it_interval);
/*
* This acts as a modification timestamp for the timer,
* so any automatic reload attempt will punt on seeing
* that we have reset the timer manually.
*/
timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timer->it_overrun_last = 0;
timer->it_overrun = -1;
if (new_expires.sched != 0 &&
(timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
!cpu_time_before(timer->it_clock, val, new_expires)) {
/*
* The designated time already passed, so we notify
* immediately, even if the thread never runs to
* accumulate more time on this clock.
*/
cpu_timer_fire(timer);
}
ret = 0;
out:
if (old) {
sample_to_timespec(timer->it_clock,
timer->it.cpu.incr, &old->it_interval);
}
return ret;
}
void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
{
union cpu_time_count now;
struct task_struct *p = timer->it.cpu.task;
int clear_dead;
/*
* Easy part: convert the reload time.
*/
sample_to_timespec(timer->it_clock,
timer->it.cpu.incr, &itp->it_interval);
if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
return;
}
if (unlikely(p == NULL)) {
/*
* This task already died and the timer will never fire.
* In this case, expires is actually the dead value.
*/
dead:
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
return;
}
/*
* Sample the clock to take the difference with the expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
clear_dead = p->exit_state;
} else {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
* Call the timer disarmed, nothing else to do.
*/
put_task_struct(p);
timer->it.cpu.task = NULL;
timer->it.cpu.expires.sched = 0;
read_unlock(&tasklist_lock);
goto dead;
} else {
cpu_clock_sample_group(timer->it_clock, p, &now);
clear_dead = (unlikely(p->exit_state) &&
thread_group_empty(p));
}
read_unlock(&tasklist_lock);
}
if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
if (timer->it.cpu.incr.sched == 0 &&
cpu_time_before(timer->it_clock,
timer->it.cpu.expires, now)) {
/*
* Do-nothing timer expired and has no reload,
* so it's as if it was never set.
*/
timer->it.cpu.expires.sched = 0;
itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
return;
}
/*
* Account for any expirations and reloads that should
* have happened.
*/
bump_cpu_timer(timer, now);
}
if (unlikely(clear_dead)) {
/*
* We've noticed that the thread is dead, but
* not yet reaped. Take this opportunity to
* drop our task ref.
*/
clear_dead_task(timer, now);
goto dead;
}
if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
sample_to_timespec(timer->it_clock,
cpu_time_sub(timer->it_clock,
timer->it.cpu.expires, now),
&itp->it_value);
} else {
/*
* The timer should have expired already, but the firing
* hasn't taken place yet. Say it's just about to expire.
*/
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
}
}
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
* tsk->it_*_expires values to reflect the remaining thread CPU timers.
*/
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
int maxfire;
struct list_head *timers = tsk->cpu_timers;
struct signal_struct *const sig = tsk->signal;
maxfire = 20;
tsk->it_prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
tsk->it_prof_expires = t->expires.cpu;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
++timers;
maxfire = 20;
tsk->it_virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
tsk->it_virt_expires = t->expires.cpu;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
++timers;
maxfire = 20;
tsk->it_sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
tsk->it_sched_expires = t->expires.sched;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
/*
* Check for the special case thread timers.
*/
if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
if (hard != RLIM_INFINITY &&
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
if (sig->rlim[RLIMIT_RTTIME].rlim_cur
< sig->rlim[RLIMIT_RTTIME].rlim_max) {
sig->rlim[RLIMIT_RTTIME].rlim_cur +=
USEC_PER_SEC;
}
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
}
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
* have already been taken off.
*/
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
int maxfire;
struct signal_struct *const sig = tsk->signal;
cputime_t utime, stime, ptime, virt_expires, prof_expires;
unsigned long long sum_sched_runtime, sched_expires;
struct task_struct *t;
struct list_head *timers = sig->cpu_timers;
/*
* Don't sample the current process CPU clocks if there are no timers.
*/
if (list_empty(&timers[CPUCLOCK_PROF]) &&
cputime_eq(sig->it_prof_expires, cputime_zero) &&
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
list_empty(&timers[CPUCLOCK_VIRT]) &&
cputime_eq(sig->it_virt_expires, cputime_zero) &&
list_empty(&timers[CPUCLOCK_SCHED]))
return;
/*
* Collect the current process totals.
*/
utime = sig->utime;
stime = sig->stime;
sum_sched_runtime = sig->sum_sched_runtime;
t = tsk;
do {
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
sum_sched_runtime += t->se.sum_exec_runtime;
t = next_thread(t);
} while (t != tsk);
ptime = cputime_add(utime, stime);
maxfire = 20;
prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
prof_expires = tl->expires.cpu;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
++timers;
maxfire = 20;
virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
virt_expires = tl->expires.cpu;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
++timers;
maxfire = 20;
sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
sched_expires = tl->expires.sched;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
/*
* Check for the special case process timers.
*/
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
if (cputime_ge(ptime, sig->it_prof_expires)) {
/* ITIMER_PROF fires and reloads. */
sig->it_prof_expires = sig->it_prof_incr;
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
sig->it_prof_expires = cputime_add(
sig->it_prof_expires, ptime);
}
__group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
(cputime_eq(prof_expires, cputime_zero) ||
cputime_lt(sig->it_prof_expires, prof_expires))) {
prof_expires = sig->it_prof_expires;
}
}
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
if (cputime_ge(utime, sig->it_virt_expires)) {
/* ITIMER_VIRTUAL fires and reloads. */
sig->it_virt_expires = sig->it_virt_incr;
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
sig->it_virt_expires = cputime_add(
sig->it_virt_expires, utime);
}
__group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
}
if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
(cputime_eq(virt_expires, cputime_zero) ||
cputime_lt(sig->it_virt_expires, virt_expires))) {
virt_expires = sig->it_virt_expires;
}
}
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime);
cputime_t x;
if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
if (sig->rlim[RLIMIT_CPU].rlim_cur
< sig->rlim[RLIMIT_CPU].rlim_max) {
sig->rlim[RLIMIT_CPU].rlim_cur++;
}
}
x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cputime_eq(prof_expires, cputime_zero) ||
cputime_lt(x, prof_expires)) {
prof_expires = x;
}
}
if (!cputime_eq(prof_expires, cputime_zero) ||
!cputime_eq(virt_expires, cputime_zero) ||
sched_expires != 0) {
/*
* Rebalance the threads' expiry times for the remaining
* process CPU timers.
*/
cputime_t prof_left, virt_left, ticks;
unsigned long long sched_left, sched;
const unsigned int nthreads = atomic_read(&sig->live);
if (!nthreads)
return;
prof_left = cputime_sub(prof_expires, utime);
prof_left = cputime_sub(prof_left, stime);
prof_left = cputime_div_non_zero(prof_left, nthreads);
virt_left = cputime_sub(virt_expires, utime);
virt_left = cputime_div_non_zero(virt_left, nthreads);
if (sched_expires) {
sched_left = sched_expires - sum_sched_runtime;
do_div(sched_left, nthreads);
sched_left = max_t(unsigned long long, sched_left, 1);
} else {
sched_left = 0;
}
t = tsk;
do {
if (unlikely(t->flags & PF_EXITING))
continue;
ticks = cputime_add(cputime_add(t->utime, t->stime),
prof_left);
if (!cputime_eq(prof_expires, cputime_zero) &&
(cputime_eq(t->it_prof_expires, cputime_zero) ||
cputime_gt(t->it_prof_expires, ticks))) {
t->it_prof_expires = ticks;
}
ticks = cputime_add(t->utime, virt_left);
if (!cputime_eq(virt_expires, cputime_zero) &&
(cputime_eq(t->it_virt_expires, cputime_zero) ||
cputime_gt(t->it_virt_expires, ticks))) {
t->it_virt_expires = ticks;
}
sched = t->se.sum_exec_runtime + sched_left;
if (sched_expires && (t->it_sched_expires == 0 ||
t->it_sched_expires > sched)) {
t->it_sched_expires = sched;
}
} while ((t = next_thread(t)) != tsk);
}
}
/*
* This is called from the signal code (via do_schedule_next_timer)
* when the last timer signal was delivered and we have to reload the timer.
*/
void posix_cpu_timer_schedule(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count now;
if (unlikely(p == NULL))
/*
* The task was cleaned up already, no future firings.
*/
goto out;
/*
* Fetch the current sample and update the timer's expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
if (unlikely(p->exit_state)) {
clear_dead_task(timer, now);
goto out;
}
read_lock(&tasklist_lock); /* arm_timer needs it. */
} else {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
*/
put_task_struct(p);
timer->it.cpu.task = p = NULL;
timer->it.cpu.expires.sched = 0;
goto out_unlock;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
/*
* We've noticed that the thread is dead, but
* not yet reaped. Take this opportunity to
* drop our task ref.
*/
clear_dead_task(timer, now);
goto out_unlock;
}
cpu_clock_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
/* Leave the tasklist_lock locked for the call below. */
}
/*
* Now re-arm for the new expiry time.
*/
arm_timer(timer, now);
out_unlock:
read_unlock(&tasklist_lock);
out:
timer->it_overrun_last = timer->it_overrun;
timer->it_overrun = -1;
++timer->it_requeue_pending;
}
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
BUG_ON(!irqs_disabled());
#define UNEXPIRED(clock) \
(cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
(tsk->it_sched_expires == 0 ||
tsk->se.sum_exec_runtime < tsk->it_sched_expires))
return;
#undef UNEXPIRED
/*
* Double-check with locks held.
*/
read_lock(&tasklist_lock);
if (likely(tsk->signal != NULL)) {
spin_lock(&tsk->sighand->siglock);
/*
* Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
* all the timers that are firing, and put them on the firing list.
*/
check_thread_timers(tsk, &firing);
check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
spin_unlock(&tsk->sighand->siglock);
}
read_unlock(&tasklist_lock);
/*
* Now that all the timers on our list have the firing flag,
* noone will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
int firing;
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
if (likely(firing >= 0)) {
cpu_timer_fire(timer);
}
spin_unlock(&timer->it_lock);
}
}
/*
* Set one of the process-wide special case CPU timers.
* The tasklist_lock and tsk->sighand->siglock must be held by the caller.
* The oldval argument is null for the RLIMIT_CPU timer, where *newval is
* absolute; non-null for ITIMER_*, where *newval is relative and we update
* it to be absolute, *oldval is absolute and we update it to be relative.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
{
union cpu_time_count now;
struct list_head *head;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
cpu_clock_sample_group_locked(clock_idx, tsk, &now);
if (oldval) {
if (!cputime_eq(*oldval, cputime_zero)) {
if (cputime_le(*oldval, now.cpu)) {
/* Just about to fire. */
*oldval = jiffies_to_cputime(1);
} else {
*oldval = cputime_sub(*oldval, now.cpu);
}
}
if (cputime_eq(*newval, cputime_zero))
return;
*newval = cputime_add(*newval, now.cpu);
/*
* If the RLIMIT_CPU timer will expire before the
* ITIMER_PROF timer, we have nothing else to do.
*/
if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
< cputime_to_secs(*newval))
return;
}
/*
* Check whether there are any process timers already set to fire
* before this one. If so, we don't have anything more to do.
*/
head = &tsk->signal->cpu_timers[clock_idx];
if (list_empty(head) ||
cputime_ge(list_first_entry(head,
struct cpu_timer_list, entry)->expires.cpu,
*newval)) {
/*
* Rejigger each thread's expiry time so that one will
* notice before we hit the process-cumulative expiry time.
*/
union cpu_time_count expires = { .sched = 0 };
expires.cpu = *newval;
process_timer_rebalance(tsk, clock_idx, expires, now);
}
}
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct itimerspec *it)
{
struct k_itimer timer;
int error;
/*
* Set up a temporary timer and then wait for it to go off.
*/
memset(&timer, 0, sizeof timer);
spin_lock_init(&timer.it_lock);
timer.it_clock = which_clock;
timer.it_overrun = -1;
error = posix_cpu_timer_create(&timer);
timer.it_process = current;
if (!error) {
static struct itimerspec zero_it;
memset(it, 0, sizeof *it);
it->it_value = *rqtp;
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_set(&timer, flags, it, NULL);
if (error) {
spin_unlock_irq(&timer.it_lock);
return error;
}
while (!signal_pending(current)) {
if (timer.it.cpu.expires.sched == 0) {
/*
* Our timer fired and was reset.
*/
spin_unlock_irq(&timer.it_lock);
return 0;
}
/*
* Block until cpu_timer_fire (or a signal) wakes us.
*/
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&timer.it_lock);
schedule();
spin_lock_irq(&timer.it_lock);
}
/*
* We were interrupted by a signal.
*/
sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
posix_cpu_timer_set(&timer, 0, &zero_it, it);
spin_unlock_irq(&timer.it_lock);
if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
/*
* It actually did fire already.
*/
return 0;
}
error = -ERESTART_RESTARTBLOCK;
}
return error;
}
int posix_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
struct restart_block *restart_block =
¤t_thread_info()->restart_block;
struct itimerspec it;
int error;
/*
* Diagnose required errors first.
*/
if (CPUCLOCK_PERTHREAD(which_clock) &&
(CPUCLOCK_PID(which_clock) == 0 ||
CPUCLOCK_PID(which_clock) == current->pid))
return -EINVAL;
error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
if (error == -ERESTART_RESTARTBLOCK) {
if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
/*
* Report back to the user the time still remaining.
*/
if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->fn = posix_cpu_nsleep_restart;
restart_block->arg0 = which_clock;
restart_block->arg1 = (unsigned long) rmtp;
restart_block->arg2 = rqtp->tv_sec;
restart_block->arg3 = rqtp->tv_nsec;
}
return error;
}
long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{
clockid_t which_clock = restart_block->arg0;
struct timespec __user *rmtp;
struct timespec t;
struct itimerspec it;
int error;
rmtp = (struct timespec __user *) restart_block->arg1;
t.tv_sec = restart_block->arg2;
t.tv_nsec = restart_block->arg3;
restart_block->fn = do_no_restart_syscall;
error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
if (error == -ERESTART_RESTARTBLOCK) {
/*
* Report back to the user the time still remaining.
*/
if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->fn = posix_cpu_nsleep_restart;
restart_block->arg0 = which_clock;
restart_block->arg1 = (unsigned long) rmtp;
restart_block->arg2 = t.tv_sec;
restart_block->arg3 = t.tv_nsec;
}
return error;
}
#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
}
static int process_cpu_clock_get(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_get(PROCESS_CLOCK, tp);
}
static int process_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = PROCESS_CLOCK;
return posix_cpu_timer_create(timer);
}
static int process_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp,
struct timespec __user *rmtp)
{
return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
}
static long process_cpu_nsleep_restart(struct restart_block *restart_block)
{
return -EINVAL;
}
static int thread_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(THREAD_CLOCK, tp);
}
static int thread_cpu_clock_get(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_get(THREAD_CLOCK, tp);
}
static int thread_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
return -EINVAL;
}
static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
{
return -EINVAL;
}
static __init int init_posix_cpu_timers(void)
{
struct k_clock process = {
.clock_getres = process_cpu_clock_getres,
.clock_get = process_cpu_clock_get,
.clock_set = do_posix_clock_nosettime,
.timer_create = process_cpu_timer_create,
.nsleep = process_cpu_nsleep,
.nsleep_restart = process_cpu_nsleep_restart,
};
struct k_clock thread = {
.clock_getres = thread_cpu_clock_getres,
.clock_get = thread_cpu_clock_get,
.clock_set = do_posix_clock_nosettime,
.timer_create = thread_cpu_timer_create,
.nsleep = thread_cpu_nsleep,
.nsleep_restart = thread_cpu_nsleep_restart,
};
register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
return 0;
}
__initcall(init_posix_cpu_timers);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_6 |
crossvul-cpp_data_good_2020_7 | /*
* txtquery io
* Teodor Sigaev <teodor@stack.net>
* contrib/ltree/ltxtquery_io.c
*/
#include "postgres.h"
#include <ctype.h>
#include "crc32.h"
#include "ltree.h"
#include "miscadmin.h"
PG_FUNCTION_INFO_V1(ltxtq_in);
Datum ltxtq_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltxtq_out);
Datum ltxtq_out(PG_FUNCTION_ARGS);
/* parser's states */
#define WAITOPERAND 1
#define INOPERAND 2
#define WAITOPERATOR 3
/*
* node of query tree, also used
* for storing polish notation in parser
*/
typedef struct NODE
{
int32 type;
int32 val;
int16 distance;
int16 length;
uint16 flag;
struct NODE *next;
} NODE;
typedef struct
{
char *buf;
int32 state;
int32 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
int32 num;
/* user-friendly operand */
int32 lenop;
int32 sumlen;
char *op;
char *curop;
} QPRS_STATE;
/*
* get token from query string
*/
static int32
gettoken_query(QPRS_STATE *state, int32 *val, int32 *lenval, char **strval, uint16 *flag)
{
int charlen;
for (;;)
{
charlen = pg_mblen(state->buf);
switch (state->state)
{
case WAITOPERAND:
if (charlen == 1 && t_iseq(state->buf, '!'))
{
(state->buf)++;
*val = (int32) '!';
return OPR;
}
else if (charlen == 1 && t_iseq(state->buf, '('))
{
state->count++;
(state->buf)++;
return OPEN;
}
else if (ISALNUM(state->buf))
{
state->state = INOPERAND;
*strval = state->buf;
*lenval = charlen;
*flag = 0;
}
else if (!t_isspace(state->buf))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("operand syntax error")));
break;
case INOPERAND:
if (ISALNUM(state->buf))
{
if (*flag)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("modificators syntax error")));
*lenval += charlen;
}
else if (charlen == 1 && t_iseq(state->buf, '%'))
*flag |= LVAR_SUBLEXEME;
else if (charlen == 1 && t_iseq(state->buf, '@'))
*flag |= LVAR_INCASE;
else if (charlen == 1 && t_iseq(state->buf, '*'))
*flag |= LVAR_ANYEND;
else
{
state->state = WAITOPERATOR;
return VAL;
}
break;
case WAITOPERATOR:
if (charlen == 1 && (t_iseq(state->buf, '&') || t_iseq(state->buf, '|')))
{
state->state = WAITOPERAND;
*val = (int32) *(state->buf);
(state->buf)++;
return OPR;
}
else if (charlen == 1 && t_iseq(state->buf, ')'))
{
(state->buf)++;
state->count--;
return (state->count < 0) ? ERR : CLOSE;
}
else if (*(state->buf) == '\0')
return (state->count) ? ERR : END;
else if (charlen == 1 && !t_iseq(state->buf, ' '))
return ERR;
break;
default:
return ERR;
break;
}
state->buf += charlen;
}
}
/*
* push new one in polish notation reverse view
*/
static void
pushquery(QPRS_STATE *state, int32 type, int32 val, int32 distance, int32 lenval, uint16 flag)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
tmp->type = type;
tmp->val = val;
tmp->flag = flag;
if (distance > 0xffff)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("value is too big")));
if (lenval > 0xff)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("operand is too long")));
tmp->distance = distance;
tmp->length = lenval;
tmp->next = state->str;
state->str = tmp;
state->num++;
}
/*
* This function is used for query_txt parsing
*/
static void
pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
{
if (lenval > 0xffff)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
pushquery(state, type, ltree_crc32_sz(strval, lenval),
state->curop - state->op, lenval, flag);
while (state->curop - state->op + lenval + 1 >= state->lenop)
{
int32 tmp = state->curop - state->op;
state->lenop *= 2;
state->op = (char *) repalloc((void *) state->op, state->lenop);
state->curop = state->op + tmp;
}
memcpy((void *) state->curop, (void *) strval, lenval);
state->curop += lenval;
*(state->curop) = '\0';
state->curop++;
state->sumlen += lenval + 1;
return;
}
#define STACKDEPTH 32
/*
* make polish notaion of query
*/
static int32
makepol(QPRS_STATE *state)
{
int32 val = 0,
type;
int32 lenval = 0;
char *strval = NULL;
int32 stack[STACKDEPTH];
int32 lenstack = 0;
uint16 flag = 0;
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
while ((type = gettoken_query(state, &val, &lenval, &strval, &flag)) != END)
{
switch (type)
{
case VAL:
pushval_asis(state, VAL, strval, lenval, flag);
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
}
break;
case OPR:
if (lenstack && val == (int32) '|')
pushquery(state, OPR, val, 0, 0, 0);
else
{
if (lenstack == STACKDEPTH)
/* internal error */
elog(ERROR, "stack too short");
stack[lenstack] = val;
lenstack++;
}
break;
case OPEN:
if (makepol(state) == ERR)
return ERR;
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
}
break;
case CLOSE:
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
};
return END;
break;
case ERR:
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
return ERR;
}
}
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
};
return END;
}
static void
findoprnd(ITEM *ptr, int32 *pos)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (ptr[*pos].type == VAL || ptr[*pos].type == VALTRUE)
{
ptr[*pos].left = 0;
(*pos)++;
}
else if (ptr[*pos].val == (int32) '!')
{
ptr[*pos].left = 1;
(*pos)++;
findoprnd(ptr, pos);
}
else
{
ITEM *curitem = &ptr[*pos];
int32 tmp = *pos;
(*pos)++;
findoprnd(ptr, pos);
curitem->left = *pos - tmp;
findoprnd(ptr, pos);
}
}
/*
* input
*/
static ltxtquery *
queryin(char *buf)
{
QPRS_STATE state;
int32 i;
ltxtquery *query;
int32 commonlen;
ITEM *ptr;
NODE *tmp;
int32 pos = 0;
#ifdef BS_DEBUG
char pbuf[16384],
*cur;
#endif
/* init state */
state.buf = buf;
state.state = WAITOPERAND;
state.count = 0;
state.num = 0;
state.str = NULL;
/* init list of operand */
state.sumlen = 0;
state.lenop = 64;
state.curop = state.op = (char *) palloc(state.lenop);
*(state.curop) = '\0';
/* parse query & make polish notation (postfix, but in reverse order) */
makepol(&state);
if (!state.num)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Empty query.")));
if (LTXTQUERY_TOO_BIG(state.num, state.sumlen))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("ltxtquery is too large")));
commonlen = COMPUTESIZE(state.num, state.sumlen);
query = (ltxtquery *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
ptr = GETQUERY(query);
/* set item in polish notation */
for (i = 0; i < state.num; i++)
{
ptr[i].type = state.str->type;
ptr[i].val = state.str->val;
ptr[i].distance = state.str->distance;
ptr[i].length = state.str->length;
ptr[i].flag = state.str->flag;
tmp = state.str->next;
pfree(state.str);
state.str = tmp;
}
/* set user friendly-operand view */
memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen);
pfree(state.op);
/* set left operand's position for every operator */
pos = 0;
findoprnd(ptr, &pos);
return query;
}
/*
* in without morphology
*/
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(queryin((char *) PG_GETARG_POINTER(0)));
}
/*
* out function
*/
typedef struct
{
ITEM *curpol;
char *buf;
char *cur;
char *op;
int32 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) \
while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) \
{ \
int32 len = (inf)->cur - (inf)->buf; \
(inf)->buflen *= 2; \
(inf)->buf = (char*) repalloc( (void*)(inf)->buf, (inf)->buflen ); \
(inf)->cur = (inf)->buf + len; \
}
/*
* recursive walk on tree and print it in
* infix (human-readable) view
*/
static void
infix(INFIX *in, bool first)
{
if (in->curpol->type == VAL)
{
char *op = in->op + in->curpol->distance;
RESIZEBUF(in, in->curpol->length * 2 + 5);
while (*op)
{
*(in->cur) = *op;
op++;
in->cur++;
}
if (in->curpol->flag & LVAR_SUBLEXEME)
{
*(in->cur) = '%';
in->cur++;
}
if (in->curpol->flag & LVAR_INCASE)
{
*(in->cur) = '@';
in->cur++;
}
if (in->curpol->flag & LVAR_ANYEND)
{
*(in->cur) = '*';
in->cur++;
}
*(in->cur) = '\0';
in->curpol++;
}
else if (in->curpol->val == (int32) '!')
{
bool isopr = false;
RESIZEBUF(in, 1);
*(in->cur) = '!';
in->cur++;
*(in->cur) = '\0';
in->curpol++;
if (in->curpol->type == OPR)
{
isopr = true;
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
infix(in, isopr);
if (isopr)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
else
{
int32 op = in->curpol->val;
INFIX nrm;
in->curpol++;
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
nrm.curpol = in->curpol;
nrm.op = in->op;
nrm.buflen = 16;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
/* get right operand */
infix(&nrm, false);
/* get & print left operand */
in->curpol = nrm.curpol;
infix(in, false);
/* print operator & right operand */
RESIZEBUF(in, 3 + (nrm.cur - nrm.buf));
sprintf(in->cur, " %c %s", op, nrm.buf);
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
}
Datum
ltxtq_out(PG_FUNCTION_ARGS)
{
ltxtquery *query = PG_GETARG_LTXTQUERY(0);
INFIX nrm;
if (query->size == 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Empty query.")));
nrm.curpol = GETQUERY(query);
nrm.buflen = 32;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
nrm.op = GETOPERAND(query);
infix(&nrm, true);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_POINTER(nrm.buf);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_7 |
crossvul-cpp_data_bad_5755_1 | /*
* BRIEF MODULE DESCRIPTION
* Au1100 LCD Driver.
*
* Rewritten for 2.6 by Embedded Alley Solutions
* <source@embeddedalley.com>, based on submissions by
* Karl Lessard <klessard@sunrisetelecom.com>
* <c.pellegrin@exadron.com>
*
* PM support added by Rodolfo Giometti <giometti@linux.it>
* Cursor enable/disable by Rodolfo Giometti <giometti@linux.it>
*
* Copyright 2002 MontaVista Software
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
* Copyright 2002 Alchemy Semiconductor
* Author: Alchemy Semiconductor
*
* Based on:
* linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
* Created 28 Dec 1997 by Geert Uytterhoeven
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
#define DEBUG 0
#include "au1100fb.h"
#define DRIVER_NAME "au1100fb"
#define DRIVER_DESC "LCD controller driver for AU1100 processors"
#define to_au1100fb_device(_info) \
(_info ? container_of(_info, struct au1100fb_device, info) : NULL);
/* Bitfields format supported by the controller. Note that the order of formats
* SHOULD be the same as in the LCD_CONTROL_SBPPF field, so we can retrieve the
* right pixel format by doing rgb_bitfields[LCD_CONTROL_SBPPF_XXX >> LCD_CONTROL_SBPPF]
*/
struct fb_bitfield rgb_bitfields[][4] =
{
/* Red, Green, Blue, Transp */
{ { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
{ { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
{ { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
/* The last is used to describe 12bpp format */
{ { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } },
};
static struct fb_fix_screeninfo au1100fb_fix = {
.id = "AU1100 FB",
.xpanstep = 1,
.ypanstep = 1,
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
};
static struct fb_var_screeninfo au1100fb_var = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
/* fb_blank
* Blank the screen. Depending on the mode, the screen will be
* activated with the backlight color, or desactivated
*/
static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
print_dbg("fb_blank %d %p", blank_mode, fbi);
switch (blank_mode) {
case VESA_NO_BLANKING:
/* Turn on panel */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
au_sync();
break;
case VESA_VSYNC_SUSPEND:
case VESA_HSYNC_SUSPEND:
case VESA_POWERDOWN:
/* Turn off panel */
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
au_sync();
break;
default:
break;
}
return 0;
}
/*
* Set hardware with var settings. This will enable the controller with a specific
* mode, normally validated with the fb_check_var method
*/
int au1100fb_setmode(struct au1100fb_device *fbdev)
{
struct fb_info *info = &fbdev->info;
u32 words;
int index;
if (!fbdev)
return -EINVAL;
/* Update var-dependent FB info */
if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) {
if (info->var.bits_per_pixel <= 8) {
/* palettized */
info->var.red.offset = 0;
info->var.red.length = info->var.bits_per_pixel;
info->var.red.msb_right = 0;
info->var.green.offset = 0;
info->var.green.length = info->var.bits_per_pixel;
info->var.green.msb_right = 0;
info->var.blue.offset = 0;
info->var.blue.length = info->var.bits_per_pixel;
info->var.blue.msb_right = 0;
info->var.transp.offset = 0;
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = info->var.xres_virtual /
(8/info->var.bits_per_pixel);
} else {
/* non-palettized */
index = (fbdev->panel->control_base & LCD_CONTROL_SBPPF_MASK) >> LCD_CONTROL_SBPPF_BIT;
info->var.red = rgb_bitfields[index][0];
info->var.green = rgb_bitfields[index][1];
info->var.blue = rgb_bitfields[index][2];
info->var.transp = rgb_bitfields[index][3];
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */
}
} else {
/* mono */
info->fix.visual = FB_VISUAL_MONO10;
info->fix.line_length = info->var.xres_virtual / 8;
}
info->screen_size = info->fix.line_length * info->var.yres_virtual;
info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \
>> LCD_CONTROL_SM_BIT) * 90;
/* Determine BPP mode and format */
fbdev->regs->lcd_control = fbdev->panel->control_base;
fbdev->regs->lcd_horztiming = fbdev->panel->horztiming;
fbdev->regs->lcd_verttiming = fbdev->panel->verttiming;
fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base;
fbdev->regs->lcd_intenable = 0;
fbdev->regs->lcd_intstatus = 0;
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys);
if (panel_is_dual(fbdev->panel)) {
/* Second panel display seconf half of screen if possible,
* otherwise display the same as the first panel */
if (info->var.yres_virtual >= (info->var.yres << 1)) {
fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys +
(info->fix.line_length *
(info->var.yres_virtual >> 1)));
} else {
fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys);
}
}
words = info->fix.line_length / sizeof(u32);
if (!info->var.rotate || (info->var.rotate == 180)) {
words *= info->var.yres_virtual;
if (info->var.rotate /* 180 */) {
words -= (words % 8); /* should be divisable by 8 */
}
}
fbdev->regs->lcd_words = LCD_WRD_WRDS_N(words);
fbdev->regs->lcd_pwmdiv = 0;
fbdev->regs->lcd_pwmhi = 0;
/* Resume controller */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
mdelay(10);
au1100fb_fb_blank(VESA_NO_BLANKING, info);
return 0;
}
/* fb_setcolreg
* Set color in LCD palette.
*/
int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi)
{
struct au1100fb_device *fbdev;
u32 *palette;
u32 value;
fbdev = to_au1100fb_device(fbi);
palette = fbdev->regs->lcd_pallettebase;
if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale */
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
}
if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
palette = (u32*)fbi->pseudo_palette;
red >>= (16 - fbi->var.red.length);
green >>= (16 - fbi->var.green.length);
blue >>= (16 - fbi->var.blue.length);
value = (red << fbi->var.red.offset) |
(green << fbi->var.green.offset)|
(blue << fbi->var.blue.offset);
value &= 0xFFFF;
} else if (panel_is_active(fbdev->panel)) {
/* COLOR TFT PALLETTIZED (use RGB 565) */
value = (red & 0xF800)|((green >> 5) & 0x07E0)|((blue >> 11) & 0x001F);
value &= 0xFFFF;
} else if (panel_is_color(fbdev->panel)) {
/* COLOR STN MODE */
value = (((panel_swap_rgb(fbdev->panel) ? blue : red) >> 12) & 0x000F) |
((green >> 8) & 0x00F0) |
(((panel_swap_rgb(fbdev->panel) ? red : blue) >> 4) & 0x0F00);
value &= 0xFFF;
} else {
/* MONOCHROME MODE */
value = (green >> 12) & 0x000F;
value &= 0xF;
}
palette[regno] = value;
return 0;
}
/* fb_pan_display
* Pan display in x and/or y as specified
*/
int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
struct au1100fb_device *fbdev;
int dy;
fbdev = to_au1100fb_device(fbi);
print_dbg("fb_pan_display %p %p", var, fbi);
if (!var || !fbdev) {
return -EINVAL;
}
if (var->xoffset - fbi->var.xoffset) {
/* No support for X panning for now! */
return -EINVAL;
}
print_dbg("fb_pan_display 2 %p %p", var, fbi);
dy = var->yoffset - fbi->var.yoffset;
if (dy) {
u32 dmaaddr;
print_dbg("Panning screen of %d lines", dy);
dmaaddr = fbdev->regs->lcd_dmaaddr0;
dmaaddr += (fbi->fix.line_length * dy);
/* TODO: Wait for current frame to finished */
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr);
if (panel_is_dual(fbdev->panel)) {
dmaaddr = fbdev->regs->lcd_dmaaddr1;
dmaaddr += (fbi->fix.line_length * dy);
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr);
}
}
print_dbg("fb_pan_display 3 %p %p", var, fbi);
return 0;
}
/* fb_rotate
* Rotate the display of this angle. This doesn't seems to be used by the core,
* but as our hardware supports it, so why not implementing it...
*/
void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
print_dbg("fb_rotate %p %d", fbi, angle);
if (fbdev && (angle > 0) && !(angle % 90)) {
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
fbdev->regs->lcd_control &= ~(LCD_CONTROL_SM_MASK);
fbdev->regs->lcd_control |= ((angle/90) << LCD_CONTROL_SM_BIT);
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
}
}
/* fb_mmap
* Map video memory in user space. We don't use the generic fb_mmap method mainly
* to allow the use of the TLB streaming flag (CCA=6)
*/
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev;
unsigned int len;
unsigned long start=0, off;
fbdev = to_au1100fb_device(fbi);
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
return -EAGAIN;
}
return 0;
}
static struct fb_ops au1100fb_ops =
{
.owner = THIS_MODULE,
.fb_setcolreg = au1100fb_fb_setcolreg,
.fb_blank = au1100fb_fb_blank,
.fb_pan_display = au1100fb_fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_rotate = au1100fb_fb_rotate,
.fb_mmap = au1100fb_fb_mmap,
};
/*-------------------------------------------------------------------------*/
static int au1100fb_setup(struct au1100fb_device *fbdev)
{
char *this_opt, *options;
int num_panels = ARRAY_SIZE(known_lcd_panels);
if (num_panels <= 0) {
print_err("No LCD panels supported by driver!");
return -ENODEV;
}
if (fb_get_options(DRIVER_NAME, &options))
return -ENODEV;
if (!options)
return -ENODEV;
while ((this_opt = strsep(&options, ",")) != NULL) {
/* Panel option */
if (!strncmp(this_opt, "panel:", 6)) {
int i;
this_opt += 6;
for (i = 0; i < num_panels; i++) {
if (!strncmp(this_opt, known_lcd_panels[i].name,
strlen(this_opt))) {
fbdev->panel = &known_lcd_panels[i];
fbdev->panel_idx = i;
break;
}
}
if (i >= num_panels) {
print_warn("Panel '%s' not supported!", this_opt);
return -ENODEV;
}
}
/* Unsupported option */
else
print_warn("Unsupported option \"%s\"", this_opt);
}
print_info("Panel=%s", fbdev->panel->name);
return 0;
}
static int au1100fb_drv_probe(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
struct resource *regs_res;
unsigned long page;
u32 sys_clksrc;
/* Allocate new device private */
fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device),
GFP_KERNEL);
if (!fbdev) {
print_err("fail to allocate device private record");
return -ENOMEM;
}
if (au1100fb_setup(fbdev))
goto failed;
platform_set_drvdata(dev, (void *)fbdev);
/* Allocate region for our registers and map them */
regs_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!regs_res) {
print_err("fail to retrieve registers resource");
return -EFAULT;
}
au1100fb_fix.mmio_start = regs_res->start;
au1100fb_fix.mmio_len = resource_size(regs_res);
if (!devm_request_mem_region(&dev->dev,
au1100fb_fix.mmio_start,
au1100fb_fix.mmio_len,
DRIVER_NAME)) {
print_err("fail to lock memory region at 0x%08lx",
au1100fb_fix.mmio_start);
return -EBUSY;
}
fbdev->regs = (struct au1100fb_regs*)KSEG1ADDR(au1100fb_fix.mmio_start);
print_dbg("Register memory map at %p", fbdev->regs);
print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len);
/* Allocate the framebuffer to the maximum screen size * nbr of video buffers */
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
}
au1100fb_fix.smem_start = fbdev->fb_phys;
au1100fb_fix.smem_len = fbdev->fb_len;
/*
* Set page reserved so that mmap will work. This is necessary
* since we'll be remapping normal memory.
*/
for (page = (unsigned long)fbdev->fb_mem;
page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len);
page += PAGE_SIZE) {
#ifdef CONFIG_DMA_NONCOHERENT
SetPageReserved(virt_to_page(CAC_ADDR((void *)page)));
#else
SetPageReserved(virt_to_page(page));
#endif
}
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Setup LCD clock to AUX (48 MHz) */
sys_clksrc = au_readl(SYS_CLKSRC) & ~(SYS_CS_ML_MASK | SYS_CS_DL | SYS_CS_CL);
au_writel((sys_clksrc | (1 << SYS_CS_ML_BIT)), SYS_CLKSRC);
/* load the panel info into the var struct */
au1100fb_var.bits_per_pixel = fbdev->panel->bpp;
au1100fb_var.xres = fbdev->panel->xres;
au1100fb_var.xres_virtual = au1100fb_var.xres;
au1100fb_var.yres = fbdev->panel->yres;
au1100fb_var.yres_virtual = au1100fb_var.yres;
fbdev->info.screen_base = fbdev->fb_mem;
fbdev->info.fbops = &au1100fb_ops;
fbdev->info.fix = au1100fb_fix;
fbdev->info.pseudo_palette =
devm_kzalloc(&dev->dev, sizeof(u32) * 16, GFP_KERNEL);
if (!fbdev->info.pseudo_palette)
return -ENOMEM;
if (fb_alloc_cmap(&fbdev->info.cmap, AU1100_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
AU1100_LCD_NBR_PALETTE_ENTRIES);
return -EFAULT;
}
fbdev->info.var = au1100fb_var;
/* Set h/w registers */
au1100fb_setmode(fbdev);
/* Register new framebuffer */
if (register_framebuffer(&fbdev->info) < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
return 0;
failed:
if (fbdev->fb_mem) {
dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
fbdev->fb_phys);
}
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
}
return -ENODEV;
}
int au1100fb_drv_remove(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
if (!dev)
return -ENODEV;
fbdev = (struct au1100fb_device *) platform_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
#endif
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
/* Clean up all probe data */
unregister_framebuffer(&fbdev->info);
fb_dealloc_cmap(&fbdev->info.cmap);
return 0;
}
#ifdef CONFIG_PM
static u32 sys_clksrc;
static struct au1100fb_regs fbregs;
int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
/* Save the clock source state */
sys_clksrc = au_readl(SYS_CLKSRC);
/* Blank the LCD */
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
/* Stop LCD clocking */
au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
return 0;
}
int au1100fb_drv_resume(struct platform_device *dev)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
/* Restart LCD clocking */
au_writel(sys_clksrc, SYS_CLKSRC);
/* Unblank the LCD */
au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
return 0;
}
#else
#define au1100fb_drv_suspend NULL
#define au1100fb_drv_resume NULL
#endif
static struct platform_driver au1100fb_driver = {
.driver = {
.name = "au1100-lcd",
.owner = THIS_MODULE,
},
.probe = au1100fb_drv_probe,
.remove = au1100fb_drv_remove,
.suspend = au1100fb_drv_suspend,
.resume = au1100fb_drv_resume,
};
static int __init au1100fb_load(void)
{
return platform_driver_register(&au1100fb_driver);
}
static void __exit au1100fb_unload(void)
{
platform_driver_unregister(&au1100fb_driver);
}
module_init(au1100fb_load);
module_exit(au1100fb_unload);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5755_1 |
crossvul-cpp_data_good_4814_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W PPPP GGGG %
% W W P P G %
% W W W PPPP G GGG %
% WW WW P G G %
% W W P GGG %
% %
% %
% Read WordPerfect Image Format %
% %
% Software Design %
% Jaroslav Fojtik %
% June 2000 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/constitute.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel-accessor.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/module.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
typedef struct
{
unsigned char Red;
unsigned char Blue;
unsigned char Green;
} RGB_Record;
/* Default palette for WPG level 1 */
static const RGB_Record WPG1_Palette[256]={
{ 0, 0, 0}, { 0, 0,168},
{ 0,168, 0}, { 0,168,168},
{168, 0, 0}, {168, 0,168},
{168, 84, 0}, {168,168,168},
{ 84, 84, 84}, { 84, 84,252},
{ 84,252, 84}, { 84,252,252},
{252, 84, 84}, {252, 84,252},
{252,252, 84}, {252,252,252}, /*16*/
{ 0, 0, 0}, { 20, 20, 20},
{ 32, 32, 32}, { 44, 44, 44},
{ 56, 56, 56}, { 68, 68, 68},
{ 80, 80, 80}, { 96, 96, 96},
{112,112,112}, {128,128,128},
{144,144,144}, {160,160,160},
{180,180,180}, {200,200,200},
{224,224,224}, {252,252,252}, /*32*/
{ 0, 0,252}, { 64, 0,252},
{124, 0,252}, {188, 0,252},
{252, 0,252}, {252, 0,188},
{252, 0,124}, {252, 0, 64},
{252, 0, 0}, {252, 64, 0},
{252,124, 0}, {252,188, 0},
{252,252, 0}, {188,252, 0},
{124,252, 0}, { 64,252, 0}, /*48*/
{ 0,252, 0}, { 0,252, 64},
{ 0,252,124}, { 0,252,188},
{ 0,252,252}, { 0,188,252},
{ 0,124,252}, { 0, 64,252},
{124,124,252}, {156,124,252},
{188,124,252}, {220,124,252},
{252,124,252}, {252,124,220},
{252,124,188}, {252,124,156}, /*64*/
{252,124,124}, {252,156,124},
{252,188,124}, {252,220,124},
{252,252,124}, {220,252,124},
{188,252,124}, {156,252,124},
{124,252,124}, {124,252,156},
{124,252,188}, {124,252,220},
{124,252,252}, {124,220,252},
{124,188,252}, {124,156,252}, /*80*/
{180,180,252}, {196,180,252},
{216,180,252}, {232,180,252},
{252,180,252}, {252,180,232},
{252,180,216}, {252,180,196},
{252,180,180}, {252,196,180},
{252,216,180}, {252,232,180},
{252,252,180}, {232,252,180},
{216,252,180}, {196,252,180}, /*96*/
{180,220,180}, {180,252,196},
{180,252,216}, {180,252,232},
{180,252,252}, {180,232,252},
{180,216,252}, {180,196,252},
{0,0,112}, {28,0,112},
{56,0,112}, {84,0,112},
{112,0,112}, {112,0,84},
{112,0,56}, {112,0,28}, /*112*/
{112,0,0}, {112,28,0},
{112,56,0}, {112,84,0},
{112,112,0}, {84,112,0},
{56,112,0}, {28,112,0},
{0,112,0}, {0,112,28},
{0,112,56}, {0,112,84},
{0,112,112}, {0,84,112},
{0,56,112}, {0,28,112}, /*128*/
{56,56,112}, {68,56,112},
{84,56,112}, {96,56,112},
{112,56,112}, {112,56,96},
{112,56,84}, {112,56,68},
{112,56,56}, {112,68,56},
{112,84,56}, {112,96,56},
{112,112,56}, {96,112,56},
{84,112,56}, {68,112,56}, /*144*/
{56,112,56}, {56,112,69},
{56,112,84}, {56,112,96},
{56,112,112}, {56,96,112},
{56,84,112}, {56,68,112},
{80,80,112}, {88,80,112},
{96,80,112}, {104,80,112},
{112,80,112}, {112,80,104},
{112,80,96}, {112,80,88}, /*160*/
{112,80,80}, {112,88,80},
{112,96,80}, {112,104,80},
{112,112,80}, {104,112,80},
{96,112,80}, {88,112,80},
{80,112,80}, {80,112,88},
{80,112,96}, {80,112,104},
{80,112,112}, {80,114,112},
{80,96,112}, {80,88,112}, /*176*/
{0,0,64}, {16,0,64},
{32,0,64}, {48,0,64},
{64,0,64}, {64,0,48},
{64,0,32}, {64,0,16},
{64,0,0}, {64,16,0},
{64,32,0}, {64,48,0},
{64,64,0}, {48,64,0},
{32,64,0}, {16,64,0}, /*192*/
{0,64,0}, {0,64,16},
{0,64,32}, {0,64,48},
{0,64,64}, {0,48,64},
{0,32,64}, {0,16,64},
{32,32,64}, {40,32,64},
{48,32,64}, {56,32,64},
{64,32,64}, {64,32,56},
{64,32,48}, {64,32,40}, /*208*/
{64,32,32}, {64,40,32},
{64,48,32}, {64,56,32},
{64,64,32}, {56,64,32},
{48,64,32}, {40,64,32},
{32,64,32}, {32,64,40},
{32,64,48}, {32,64,56},
{32,64,64}, {32,56,64},
{32,48,64}, {32,40,64}, /*224*/
{44,44,64}, {48,44,64},
{52,44,64}, {60,44,64},
{64,44,64}, {64,44,60},
{64,44,52}, {64,44,48},
{64,44,44}, {64,48,44},
{64,52,44}, {64,60,44},
{64,64,44}, {60,64,44},
{52,64,44}, {48,64,44}, /*240*/
{44,64,44}, {44,64,48},
{44,64,52}, {44,64,60},
{44,64,64}, {44,60,64},
{44,55,64}, {44,48,64},
{0,0,0}, {0,0,0},
{0,0,0}, {0,0,0},
{0,0,0}, {0,0,0},
{0,0,0}, {0,0,0} /*256*/
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W P G %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWPG() returns True if the image format type, identified by the magick
% string, is WPG.
%
% The format of the IsWPG method is:
%
% unsigned int IsWPG(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o status: Method IsWPG returns True if the image format type is WPG.
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static unsigned int IsWPG(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (memcmp(magick,"\377WPC",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
static void Rd_WP_DWORD(Image *image,size_t *d)
{
unsigned char
b;
b=ReadBlobByte(image);
*d=b;
if (b < 0xFFU)
return;
b=ReadBlobByte(image);
*d=(size_t) b;
b=ReadBlobByte(image);
*d+=(size_t) b*256l;
if (*d < 0x8000)
return;
*d=(*d & 0x7FFF) << 16;
b=ReadBlobByte(image);
*d+=(size_t) b;
b=ReadBlobByte(image);
*d+=(size_t) b*256l;
return;
}
static void InsertRow(unsigned char *p,ssize_t y,Image *image, int bpp)
{
ExceptionInfo
*exception;
int
bit;
ssize_t
x;
register PixelPacket
*q;
IndexPacket
index;
register IndexPacket
*indexes;
exception=(&image->exception);
switch (bpp)
{
case 1: /* Convert bitmap scanline. */
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
index=((*p) & (0x80 >> bit) ? 0x01 : 0x00);
SetPixelIndex(indexes+x+bit,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (ssize_t) (image->columns % 8); bit++)
{
index=((*p) & (0x80 >> bit) ? 0x01 : 0x00);
SetPixelIndex(indexes+x+bit,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
p++;
}
if (!SyncAuthenticPixels(image,exception))
break;
break;
}
case 2: /* Convert PseudoColor scanline. */
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < ((ssize_t) image->columns-3); x+=4)
{
index=ConstrainColormapIndex(image,(*p >> 6) & 0x3);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
index=ConstrainColormapIndex(image,(*p >> 4) & 0x3);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
index=ConstrainColormapIndex(image,(*p >> 2) & 0x3);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
index=ConstrainColormapIndex(image,(*p) & 0x3);
SetPixelIndex(indexes+x+1,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
p++;
}
if ((image->columns % 4) != 0)
{
index=ConstrainColormapIndex(image,(*p >> 6) & 0x3);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
if ((image->columns % 4) > 1)
{
index=ConstrainColormapIndex(image,(*p >> 4) & 0x3);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
if ((image->columns % 4) > 2)
{
index=ConstrainColormapIndex(image,(*p >> 2) & 0x3);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
}
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
break;
}
case 4: /* Convert PseudoColor scanline. */
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
index=ConstrainColormapIndex(image,(*p) & 0x0f);
SetPixelIndex(indexes+x+1,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
p++;
q++;
}
if ((image->columns % 2) != 0)
{
index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
break;
}
case 8: /* Convert PseudoColor scanline. */
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL) break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=ConstrainColormapIndex(image,*p);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
break;
case 24: /* Convert DirectColor scanline. */
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum(*p++));
SetPixelGreen(q,ScaleCharToQuantum(*p++));
SetPixelBlue(q,ScaleCharToQuantum(*p++));
q++;
}
if (!SyncAuthenticPixels(image,exception))
break;
break;
}
}
/* Helper for WPG1 raster reader. */
#define InsertByte(b) \
{ \
BImgBuff[x]=b; \
x++; \
if((ssize_t) x>=ldblk) \
{ \
InsertRow(BImgBuff,(ssize_t) y,image,bpp); \
x=0; \
y++; \
} \
}
/* WPG1 raster reader. */
static int UnpackWPGRaster(Image *image,int bpp)
{
int
x,
y,
i;
unsigned char
bbuf,
*BImgBuff,
RunCount;
ssize_t
ldblk;
x=0;
y=0;
ldblk=(ssize_t) ((bpp*image->columns+7)/8);
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk,
8*sizeof(*BImgBuff));
if(BImgBuff==NULL) return(-2);
while(y<(ssize_t) image->rows)
{
int
c;
c=ReadBlobByte(image);
if (c == EOF)
break;
bbuf=(unsigned char) c;
RunCount=bbuf & 0x7F;
if(bbuf & 0x80)
{
if(RunCount) /* repeat next byte runcount * */
{
bbuf=ReadBlobByte(image);
for(i=0;i<(int) RunCount;i++) InsertByte(bbuf);
}
else { /* read next byte as RunCount; repeat 0xFF runcount* */
c=ReadBlobByte(image);
if (c < 0)
break;
RunCount=(unsigned char) c;
for(i=0;i<(int) RunCount;i++) InsertByte(0xFF);
}
}
else {
if(RunCount) /* next runcount byte are readed directly */
{
for(i=0;i < (int) RunCount;i++)
{
bbuf=ReadBlobByte(image);
InsertByte(bbuf);
}
}
else { /* repeat previous line runcount* */
c=ReadBlobByte(image);
if (c < 0)
break;
RunCount=(unsigned char) c;
if(x) { /* attempt to duplicate row from x position: */
/* I do not know what to do here */
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(-3);
}
for(i=0;i < (int) RunCount;i++)
{
x=0;
y++; /* Here I need to duplicate previous row RUNCOUNT* */
if(y<2) continue;
if(y>(ssize_t) image->rows)
{
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(-4);
}
InsertRow(BImgBuff,y-1,image,bpp);
}
}
}
if (EOFBlob(image) != MagickFalse)
break;
}
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(y < (ssize_t) image->rows ? -5 : 0);
}
/* Helper for WPG2 reader. */
#define InsertByte6(b) \
{ \
DisableMSCWarning(4310) \
if(XorMe)\
BImgBuff[x] = (unsigned char)~b;\
else\
BImgBuff[x] = b;\
RestoreMSCWarning \
x++; \
if((ssize_t) x >= ldblk) \
{ \
InsertRow(BImgBuff,(ssize_t) y,image,bpp); \
x=0; \
y++; \
} \
}
/* WPG2 raster reader. */
static int UnpackWPG2Raster(Image *image,int bpp)
{
int XorMe = 0;
int
RunCount;
size_t
x,
y;
ssize_t
i,
ldblk;
unsigned int
SampleSize=1;
unsigned char
bbuf,
*BImgBuff,
SampleBuffer[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
x=0;
y=0;
ldblk=(ssize_t) ((bpp*image->columns+7)/8);
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk,
sizeof(*BImgBuff));
if(BImgBuff==NULL)
return(-2);
while( y< image->rows)
{
bbuf=ReadBlobByte(image);
switch(bbuf)
{
case 0x7D:
SampleSize=ReadBlobByte(image); /* DSZ */
if(SampleSize>8)
{
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(-2);
}
if(SampleSize<1)
{
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(-2);
}
break;
case 0x7E:
(void) FormatLocaleFile(stderr,
"\nUnsupported WPG token XOR, please report!");
XorMe=!XorMe;
break;
case 0x7F:
RunCount=ReadBlobByte(image); /* BLK */
if (RunCount < 0)
break;
for(i=0; i < SampleSize*(RunCount+1); i++)
{
InsertByte6(0);
}
break;
case 0xFD:
RunCount=ReadBlobByte(image); /* EXT */
if (RunCount < 0)
break;
for(i=0; i<= RunCount;i++)
for(bbuf=0; bbuf < SampleSize; bbuf++)
InsertByte6(SampleBuffer[bbuf]);
break;
case 0xFE:
RunCount=ReadBlobByte(image); /* RST */
if (RunCount < 0)
break;
if(x!=0)
{
(void) FormatLocaleFile(stderr,
"\nUnsupported WPG2 unaligned token RST x=%.20g, please report!\n"
,(double) x);
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(-3);
}
{
/* duplicate the previous row RunCount x */
for(i=0;i<=RunCount;i++)
{
InsertRow(BImgBuff,(ssize_t) (image->rows >= y ? y : image->rows-1),
image,bpp);
y++;
}
}
break;
case 0xFF:
RunCount=ReadBlobByte(image); /* WHT */
if (RunCount < 0)
break;
for (i=0; i < SampleSize*(RunCount+1); i++)
{
InsertByte6(0xFF);
}
break;
default:
RunCount=bbuf & 0x7F;
if(bbuf & 0x80) /* REP */
{
for(i=0; i < SampleSize; i++)
SampleBuffer[i]=ReadBlobByte(image);
for(i=0;i<=RunCount;i++)
for(bbuf=0;bbuf<SampleSize;bbuf++)
InsertByte6(SampleBuffer[bbuf]);
}
else { /* NRP */
for(i=0; i< SampleSize*(RunCount+1);i++)
{
bbuf=ReadBlobByte(image);
InsertByte6(bbuf);
}
}
}
if (EOFBlob(image) != MagickFalse)
break;
}
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
return(0);
}
typedef float tCTM[3][3];
static unsigned LoadWPG2Flags(Image *image,char Precision,float *Angle,tCTM *CTM)
{
const unsigned char TPR=1,TRN=2,SKW=4,SCL=8,ROT=0x10,OID=0x20,LCK=0x80;
ssize_t x;
unsigned DenX;
unsigned Flags;
(void) memset(*CTM,0,sizeof(*CTM)); /*CTM.erase();CTM.resize(3,3);*/
(*CTM)[0][0]=1;
(*CTM)[1][1]=1;
(*CTM)[2][2]=1;
Flags=ReadBlobLSBShort(image);
if(Flags & LCK) (void) ReadBlobLSBLong(image); /*Edit lock*/
if(Flags & OID)
{
if(Precision==0)
{(void) ReadBlobLSBShort(image);} /*ObjectID*/
else
{(void) ReadBlobLSBLong(image);} /*ObjectID (Double precision)*/
}
if(Flags & ROT)
{
x=ReadBlobLSBLong(image); /*Rot Angle*/
if(Angle) *Angle=x/65536.0;
}
if(Flags & (ROT|SCL))
{
x=ReadBlobLSBLong(image); /*Sx*cos()*/
(*CTM)[0][0] = (float)x/0x10000;
x=ReadBlobLSBLong(image); /*Sy*cos()*/
(*CTM)[1][1] = (float)x/0x10000;
}
if(Flags & (ROT|SKW))
{
x=ReadBlobLSBLong(image); /*Kx*sin()*/
(*CTM)[1][0] = (float)x/0x10000;
x=ReadBlobLSBLong(image); /*Ky*sin()*/
(*CTM)[0][1] = (float)x/0x10000;
}
if(Flags & TRN)
{
x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Tx*/
if(x>=0) (*CTM)[0][2] = (float)x+(float)DenX/0x10000;
else (*CTM)[0][2] = (float)x-(float)DenX/0x10000;
x=ReadBlobLSBLong(image); DenX=ReadBlobLSBShort(image); /*Ty*/
(*CTM)[1][2]=(float)x + ((x>=0)?1:-1)*(float)DenX/0x10000;
if(x>=0) (*CTM)[1][2] = (float)x+(float)DenX/0x10000;
else (*CTM)[1][2] = (float)x-(float)DenX/0x10000;
}
if(Flags & TPR)
{
x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Px*/
(*CTM)[2][0] = x + (float)DenX/0x10000;;
x=ReadBlobLSBShort(image); DenX=ReadBlobLSBShort(image); /*Py*/
(*CTM)[2][1] = x + (float)DenX/0x10000;
}
return(Flags);
}
static Image *ExtractPostscript(Image *image,const ImageInfo *image_info,
MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception)
{
char
postscript_file[MaxTextExtent];
const MagicInfo
*magic_info;
FILE
*ps_file;
ImageInfo
*clone_info;
Image
*image2;
unsigned char
magick[2*MaxTextExtent];
if ((clone_info=CloneImageInfo(image_info)) == NULL)
return(image);
clone_info->blob=(void *) NULL;
clone_info->length=0;
/* Obtain temporary file */
(void) AcquireUniqueFilename(postscript_file);
ps_file=fopen_utf8(postscript_file,"wb");
if (ps_file == (FILE *) NULL)
goto FINISH;
/* Copy postscript to temporary file */
(void) SeekBlob(image,PS_Offset,SEEK_SET);
(void) ReadBlob(image, 2*MaxTextExtent, magick);
(void) SeekBlob(image,PS_Offset,SEEK_SET);
while(PS_Size-- > 0)
{
(void) fputc(ReadBlobByte(image),ps_file);
}
(void) fclose(ps_file);
/* Detect file format - Check magic.mgk configuration file. */
magic_info=GetMagicInfo(magick,2*MaxTextExtent,exception);
if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL;
/* printf("Detected:%s \n",magic_info->name); */
if(exception->severity != UndefinedException) goto FINISH_UNL;
if(magic_info->name == (char *) NULL) goto FINISH_UNL;
(void) strncpy(clone_info->magick,magic_info->name,MaxTextExtent-1);
/* Read nested image */
/*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/
FormatLocaleString(clone_info->filename,MaxTextExtent,"%s",postscript_file);
image2=ReadImage(clone_info,exception);
if (!image2)
goto FINISH_UNL;
/*
Replace current image with new image while copying base image
attributes.
*/
(void) CopyMagickString(image2->filename,image->filename,MaxTextExtent);
(void) CopyMagickString(image2->magick_filename,image->magick_filename,MaxTextExtent);
(void) CopyMagickString(image2->magick,image->magick,MaxTextExtent);
image2->depth=image->depth;
DestroyBlob(image2);
image2->blob=ReferenceBlob(image->blob);
if ((image->rows == 0) || (image->columns == 0))
DeleteImageFromList(&image);
AppendImageToList(&image,image2);
FINISH_UNL:
(void) RelinquishUniqueFileResource(postscript_file);
FINISH:
DestroyImageInfo(clone_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d W P G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method ReadWPGImage reads an WPG X image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadWPGImage method is:
%
% Image *ReadWPGImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Method ReadWPGImage returns a pointer to the image after
% reading. A null image is returned if there is a memory shortage or if
% the image cannot be read.
%
% o image_info: Specifies a pointer to a ImageInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadWPGImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
typedef struct
{
size_t FileId;
MagickOffsetType DataOffset;
unsigned int ProductType;
unsigned int FileType;
unsigned char MajorVersion;
unsigned char MinorVersion;
unsigned int EncryptKey;
unsigned int Reserved;
} WPGHeader;
typedef struct
{
unsigned char RecType;
size_t RecordLength;
} WPGRecord;
typedef struct
{
unsigned char Class;
unsigned char RecType;
size_t Extension;
size_t RecordLength;
} WPG2Record;
typedef struct
{
unsigned HorizontalUnits;
unsigned VerticalUnits;
unsigned char PosSizePrecision;
} WPG2Start;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType1;
typedef struct
{
unsigned int Width;
unsigned int Height;
unsigned char Depth;
unsigned char Compression;
} WPG2BitmapType1;
typedef struct
{
unsigned int RotAngle;
unsigned int LowLeftX;
unsigned int LowLeftY;
unsigned int UpRightX;
unsigned int UpRightY;
unsigned int Width;
unsigned int Height;
unsigned int Depth;
unsigned int HorzRes;
unsigned int VertRes;
} WPGBitmapType2;
typedef struct
{
unsigned int StartIndex;
unsigned int NumOfEntries;
} WPGColorMapRec;
/*
typedef struct {
size_t PS_unknown1;
unsigned int PS_unknown2;
unsigned int PS_unknown3;
} WPGPSl1Record;
*/
Image
*image;
unsigned int
status;
WPGHeader
Header;
WPGRecord
Rec;
WPG2Record
Rec2;
WPG2Start StartWPG;
WPGBitmapType1
BitmapHeader1;
WPG2BitmapType1
Bitmap2Header1;
WPGBitmapType2
BitmapHeader2;
WPGColorMapRec
WPG_Palette;
int
i,
bpp,
WPG2Flags;
ssize_t
ldblk;
size_t
one;
unsigned char
*BImgBuff;
tCTM CTM; /*current transform matrix*/
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
one=1;
image=AcquireImage(image_info);
image->depth=8;
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read WPG image.
*/
Header.FileId=ReadBlobLSBLong(image);
Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image);
Header.ProductType=ReadBlobLSBShort(image);
Header.FileType=ReadBlobLSBShort(image);
Header.MajorVersion=ReadBlobByte(image);
Header.MinorVersion=ReadBlobByte(image);
Header.EncryptKey=ReadBlobLSBShort(image);
Header.Reserved=ReadBlobLSBShort(image);
if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (Header.EncryptKey!=0)
ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported");
image->columns = 1;
image->rows = 1;
image->colors = 0;
bpp=0;
BitmapHeader2.RotAngle=0;
Rec2.RecordLength = 0;
switch(Header.FileType)
{
case 1: /* WPG level 1 */
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec.RecordLength;
switch(Rec.RecType)
{
case 0x0B: /* bitmap type 1 */
BitmapHeader1.Width=ReadBlobLSBShort(image);
BitmapHeader1.Height=ReadBlobLSBShort(image);
if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader1.Depth=ReadBlobLSBShort(image);
BitmapHeader1.HorzRes=ReadBlobLSBShort(image);
BitmapHeader1.VertRes=ReadBlobLSBShort(image);
if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes)
{
image->units=PixelsPerCentimeterResolution;
image->x_resolution=BitmapHeader1.HorzRes/470.0;
image->y_resolution=BitmapHeader1.VertRes/470.0;
}
image->columns=BitmapHeader1.Width;
image->rows=BitmapHeader1.Height;
bpp=BitmapHeader1.Depth;
goto UnpackRaster;
case 0x0E: /*Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) >
(Rec2.RecordLength-2-2) / 3)
ThrowReaderException(CorruptImageError,"InvalidColormapIndex");
image->colors=WPG_Palette.NumOfEntries;
if (!AcquireImageColormap(image,image->colors))
goto NoMemory;
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
}
break;
case 0x11: /* Start PS l1 */
if(Rec.RecordLength > 8)
image=ExtractPostscript(image,image_info,
TellBlob(image)+8, /* skip PS header in the wpg */
(ssize_t) Rec.RecordLength-8,exception);
break;
case 0x14: /* bitmap type 2 */
BitmapHeader2.RotAngle=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftX=ReadBlobLSBShort(image);
BitmapHeader2.LowLeftY=ReadBlobLSBShort(image);
BitmapHeader2.UpRightX=ReadBlobLSBShort(image);
BitmapHeader2.UpRightY=ReadBlobLSBShort(image);
BitmapHeader2.Width=ReadBlobLSBShort(image);
BitmapHeader2.Height=ReadBlobLSBShort(image);
if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
BitmapHeader2.Depth=ReadBlobLSBShort(image);
BitmapHeader2.HorzRes=ReadBlobLSBShort(image);
BitmapHeader2.VertRes=ReadBlobLSBShort(image);
image->units=PixelsPerCentimeterResolution;
image->page.width=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0);
image->page.height=(unsigned int)
((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0);
image->page.x=(int) (BitmapHeader2.LowLeftX/470.0);
image->page.y=(int) (BitmapHeader2.LowLeftX/470.0);
if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes)
{
image->x_resolution=BitmapHeader2.HorzRes/470.0;
image->y_resolution=BitmapHeader2.VertRes/470.0;
}
image->columns=BitmapHeader2.Width;
image->rows=BitmapHeader2.Height;
bpp=BitmapHeader2.Depth;
UnpackRaster:
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
break;
if ((image->colors == 0) && (bpp != 24))
{
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors))
{
NoMemory:
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
/* printf("Load default colormap \n"); */
for (i=0; (i < (int) image->colors) && (i < 256); i++)
{
image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red);
image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green);
image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue);
}
}
else
{
if (bpp < 24)
if ( (image->colors < (one << bpp)) && (bpp != 24) )
image->colormap=(PixelPacket *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
if (bpp == 1)
{
if(image->colormap[0].red==0 &&
image->colormap[0].green==0 &&
image->colormap[0].blue==0 &&
image->colormap[1].red==0 &&
image->colormap[1].green==0 &&
image->colormap[1].blue==0)
{ /* fix crippled monochrome palette */
image->colormap[1].red =
image->colormap[1].green =
image->colormap[1].blue = QuantumRange;
}
}
if(UnpackWPGRaster(image,bpp) < 0)
/* The raster cannot be unpacked */
{
DecompressionFailed:
ThrowReaderException(CoderError,"UnableToDecompressImage");
}
if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping)
{
/* flop command */
if(BitmapHeader2.RotAngle & 0x8000)
{
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
ReplaceImageInList(&image,flop_image);
}
}
/* flip command */
if(BitmapHeader2.RotAngle & 0x2000)
{
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
ReplaceImageInList(&image,flip_image);
}
}
/* rotate command */
if(BitmapHeader2.RotAngle & 0x0FFF)
{
Image
*rotate_image;
rotate_image=RotateImage(image,(BitmapHeader2.RotAngle &
0x0FFF), exception);
if (rotate_image != (Image *) NULL) {
DuplicateBlob(rotate_image,image);
ReplaceImageInList(&image,rotate_image);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=1;
image->colors=0;
break;
case 0x1B: /* Postscript l2 */
if(Rec.RecordLength>0x3C)
image=ExtractPostscript(image,image_info,
TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */
(ssize_t) Rec.RecordLength-0x3C,exception);
break;
}
}
break;
case 2: /* WPG level 2 */
(void) memset(CTM,0,sizeof(CTM));
StartWPG.PosSizePrecision = 0;
while(!EOFBlob(image)) /* object parser loop */
{
(void) SeekBlob(image,Header.DataOffset,SEEK_SET);
if(EOFBlob(image))
break;
Rec2.Class=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rec2.RecType=(i=ReadBlobByte(image));
if(i==EOF)
break;
Rd_WP_DWORD(image,&Rec2.Extension);
Rd_WP_DWORD(image,&Rec2.RecordLength);
if(EOFBlob(image))
break;
Header.DataOffset=TellBlob(image)+Rec2.RecordLength;
switch(Rec2.RecType)
{
case 1:
StartWPG.HorizontalUnits=ReadBlobLSBShort(image);
StartWPG.VerticalUnits=ReadBlobLSBShort(image);
StartWPG.PosSizePrecision=ReadBlobByte(image);
break;
case 0x0C: /* Color palette */
WPG_Palette.StartIndex=ReadBlobLSBShort(image);
WPG_Palette.NumOfEntries=ReadBlobLSBShort(image);
if ((WPG_Palette.NumOfEntries-WPG_Palette.StartIndex) >
(Rec2.RecordLength-2-2) / 3)
ThrowReaderException(CorruptImageError,"InvalidColormapIndex");
image->colors=WPG_Palette.NumOfEntries;
if (AcquireImageColormap(image,image->colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
for (i=WPG_Palette.StartIndex;
i < (int)WPG_Palette.NumOfEntries; i++)
{
image->colormap[i].red=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].green=ScaleCharToQuantum((char)
ReadBlobByte(image));
image->colormap[i].blue=ScaleCharToQuantum((char)
ReadBlobByte(image));
(void) ReadBlobByte(image); /*Opacity??*/
}
break;
case 0x0E:
Bitmap2Header1.Width=ReadBlobLSBShort(image);
Bitmap2Header1.Height=ReadBlobLSBShort(image);
if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
Bitmap2Header1.Depth=ReadBlobByte(image);
Bitmap2Header1.Compression=ReadBlobByte(image);
if(Bitmap2Header1.Compression > 1)
continue; /*Unknown compression method */
switch(Bitmap2Header1.Depth)
{
case 1:
bpp=1;
break;
case 2:
bpp=2;
break;
case 3:
bpp=4;
break;
case 4:
bpp=8;
break;
case 8:
bpp=24;
break;
default:
continue; /*Ignore raster with unknown depth*/
}
image->columns=Bitmap2Header1.Width;
image->rows=Bitmap2Header1.Height;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
break;
if ((image->colors == 0) && (bpp != 24))
{
size_t
one;
one=1;
image->colors=one << bpp;
if (!AcquireImageColormap(image,image->colors))
goto NoMemory;
}
else
{
if(bpp < 24)
if( image->colors<(one << bpp) && bpp!=24 )
image->colormap=(PixelPacket *) ResizeQuantumMemory(
image->colormap,(size_t) (one << bpp),
sizeof(*image->colormap));
}
switch(Bitmap2Header1.Compression)
{
case 0: /*Uncompressed raster*/
{
ldblk=(ssize_t) ((bpp*image->columns+7)/8);
BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t)
ldblk+1,sizeof(*BImgBuff));
if (BImgBuff == (unsigned char *) NULL)
goto NoMemory;
for(i=0; i< (ssize_t) image->rows; i++)
{
(void) ReadBlob(image,ldblk,BImgBuff);
InsertRow(BImgBuff,i,image,bpp);
}
if(BImgBuff)
BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);
break;
}
case 1: /*RLE for WPG2 */
{
if( UnpackWPG2Raster(image,bpp) < 0)
goto DecompressionFailed;
break;
}
}
if(CTM[0][0]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flop_image;
flop_image = FlopImage(image, exception);
if (flop_image != (Image *) NULL) {
DuplicateBlob(flop_image,image);
ReplaceImageInList(&image,flop_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0;
Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll);
Tx(1,2)=0; Tx(2,2)=1; */
}
if(CTM[1][1]<0 && !image_info->ping)
{ /*?? RotAngle=360-RotAngle;*/
Image
*flip_image;
flip_image = FlipImage(image, exception);
if (flip_image != (Image *) NULL) {
DuplicateBlob(flip_image,image);
ReplaceImageInList(&image,flip_image);
}
/* Try to change CTM according to Flip - I am not sure, must be checked.
float_matrix Tx(3,3);
Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0;
Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0;
Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll);
Tx(2,2)=1; */
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image);
image->depth=8;
if (image->next == (Image *) NULL)
goto Finish;
image=SyncNextImageInList(image);
image->columns=image->rows=1;
image->colors=0;
break;
case 0x12: /* Postscript WPG2*/
i=ReadBlobLSBShort(image);
if(Rec2.RecordLength > (unsigned int) i)
image=ExtractPostscript(image,image_info,
TellBlob(image)+i, /*skip PS header in the wpg2*/
(ssize_t) (Rec2.RecordLength-i-2),exception);
break;
case 0x1B: /*bitmap rectangle*/
WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM);
(void) WPG2Flags;
break;
}
}
break;
default:
{
ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported");
}
}
Finish:
(void) CloseBlob(image);
{
Image
*p;
ssize_t
scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers.
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=(size_t) scene++;
}
if (image == (Image *) NULL)
ThrowReaderException(CorruptImageError,
"ImageFileDoesNotContainAnyImageData");
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r W P G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method RegisterWPGImage adds attributes for the WPG image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterWPGImage method is:
%
% size_t RegisterWPGImage(void)
%
*/
ModuleExport size_t RegisterWPGImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("WPG");
entry->decoder=(DecodeImageHandler *) ReadWPGImage;
entry->magick=(IsImageFormatHandler *) IsWPG;
entry->description=AcquireString("Word Perfect Graphics");
entry->module=ConstantString("WPG");
entry->seekable_stream=MagickTrue;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r W P G I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Method UnregisterWPGImage removes format registrations made by the
% WPG module from the list of supported formats.
%
% The format of the UnregisterWPGImage method is:
%
% UnregisterWPGImage(void)
%
*/
ModuleExport void UnregisterWPGImage(void)
{
(void) UnregisterMagickInfo("WPG");
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_4814_0 |
crossvul-cpp_data_bad_3527_4 | /*
* linux/net/sunrpc/xdr.c
*
* Generic XDR support.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
/*
* XDR functions for basic NFS types
*/
__be32 *
xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
{
unsigned int quadlen = XDR_QUADLEN(obj->len);
p[quadlen] = 0; /* zero trailing bytes */
*p++ = cpu_to_be32(obj->len);
memcpy(p, obj->data, obj->len);
return p + XDR_QUADLEN(obj->len);
}
EXPORT_SYMBOL_GPL(xdr_encode_netobj);
__be32 *
xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
{
unsigned int len;
if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
return NULL;
obj->len = len;
obj->data = (u8 *) p;
return p + XDR_QUADLEN(len);
}
EXPORT_SYMBOL_GPL(xdr_decode_netobj);
/**
* xdr_encode_opaque_fixed - Encode fixed length opaque data
* @p: pointer to current position in XDR buffer.
* @ptr: pointer to data to encode (or NULL)
* @nbytes: size of data.
*
* Copy the array of data of length nbytes at ptr to the XDR buffer
* at position p, then align to the next 32-bit boundary by padding
* with zero bytes (see RFC1832).
* Note: if ptr is NULL, only the padding is performed.
*
* Returns the updated current XDR buffer position
*
*/
__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
{
if (likely(nbytes != 0)) {
unsigned int quadlen = XDR_QUADLEN(nbytes);
unsigned int padding = (quadlen << 2) - nbytes;
if (ptr != NULL)
memcpy(p, ptr, nbytes);
if (padding != 0)
memset((char *)p + nbytes, 0, padding);
p += quadlen;
}
return p;
}
EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
/**
* xdr_encode_opaque - Encode variable length opaque data
* @p: pointer to current position in XDR buffer.
* @ptr: pointer to data to encode (or NULL)
* @nbytes: size of data.
*
* Returns the updated current XDR buffer position
*/
__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
{
*p++ = cpu_to_be32(nbytes);
return xdr_encode_opaque_fixed(p, ptr, nbytes);
}
EXPORT_SYMBOL_GPL(xdr_encode_opaque);
__be32 *
xdr_encode_string(__be32 *p, const char *string)
{
return xdr_encode_array(p, string, strlen(string));
}
EXPORT_SYMBOL_GPL(xdr_encode_string);
__be32 *
xdr_decode_string_inplace(__be32 *p, char **sp,
unsigned int *lenp, unsigned int maxlen)
{
u32 len;
len = be32_to_cpu(*p++);
if (len > maxlen)
return NULL;
*lenp = len;
*sp = (char *) p;
return p + XDR_QUADLEN(len);
}
EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
/**
* xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
* @buf: XDR buffer where string resides
* @len: length of string, in bytes
*
*/
void
xdr_terminate_string(struct xdr_buf *buf, const u32 len)
{
char *kaddr;
kaddr = kmap_atomic(buf->pages[0], KM_USER0);
kaddr[buf->page_base + len] = '\0';
kunmap_atomic(kaddr, KM_USER0);
}
EXPORT_SYMBOL_GPL(xdr_terminate_string);
void
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
unsigned int len)
{
struct kvec *tail = xdr->tail;
u32 *p;
xdr->pages = pages;
xdr->page_base = base;
xdr->page_len = len;
p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
tail->iov_base = p;
tail->iov_len = 0;
if (len & 3) {
unsigned int pad = 4 - (len & 3);
*p = 0;
tail->iov_base = (char *)p + (len & 3);
tail->iov_len = pad;
len += pad;
}
xdr->buflen += len;
xdr->len += len;
}
EXPORT_SYMBOL_GPL(xdr_encode_pages);
void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
struct kvec *head = xdr->head;
struct kvec *tail = xdr->tail;
char *buf = (char *)head->iov_base;
unsigned int buflen = head->iov_len;
head->iov_len = offset;
xdr->pages = pages;
xdr->page_base = base;
xdr->page_len = len;
tail->iov_base = buf + offset;
tail->iov_len = buflen - offset;
xdr->buflen += len;
}
EXPORT_SYMBOL_GPL(xdr_inline_pages);
/*
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
*
* _shift_data_right_pages
* @pages: vector of pages containing both the source and dest memory area.
* @pgto_base: page vector address of destination
* @pgfrom_base: page vector address of source
* @len: number of bytes to copy
*
* Note: the addresses pgto_base and pgfrom_base are both calculated in
* the same way:
* if a memory area starts at byte 'base' in page 'pages[i]',
* then its address is given as (i << PAGE_CACHE_SHIFT) + base
* Also note: pgfrom_base must be < pgto_base, but the memory areas
* they point to may overlap.
*/
static void
_shift_data_right_pages(struct page **pages, size_t pgto_base,
size_t pgfrom_base, size_t len)
{
struct page **pgfrom, **pgto;
char *vfrom, *vto;
size_t copy;
BUG_ON(pgto_base <= pgfrom_base);
pgto_base += len;
pgfrom_base += len;
pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
pgto_base &= ~PAGE_CACHE_MASK;
pgfrom_base &= ~PAGE_CACHE_MASK;
do {
/* Are any pointers crossing a page boundary? */
if (pgto_base == 0) {
pgto_base = PAGE_CACHE_SIZE;
pgto--;
}
if (pgfrom_base == 0) {
pgfrom_base = PAGE_CACHE_SIZE;
pgfrom--;
}
copy = len;
if (copy > pgto_base)
copy = pgto_base;
if (copy > pgfrom_base)
copy = pgfrom_base;
pgto_base -= copy;
pgfrom_base -= copy;
vto = kmap_atomic(*pgto, KM_USER0);
vfrom = kmap_atomic(*pgfrom, KM_USER1);
memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
flush_dcache_page(*pgto);
kunmap_atomic(vfrom, KM_USER1);
kunmap_atomic(vto, KM_USER0);
} while ((len -= copy) != 0);
}
/*
* _copy_to_pages
* @pages: array of pages
* @pgbase: page vector address of destination
* @p: pointer to source data
* @len: length
*
* Copies data from an arbitrary memory location into an array of pages
* The copy is assumed to be non-overlapping.
*/
static void
_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
{
struct page **pgto;
char *vto;
size_t copy;
pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
pgbase &= ~PAGE_CACHE_MASK;
for (;;) {
copy = PAGE_CACHE_SIZE - pgbase;
if (copy > len)
copy = len;
vto = kmap_atomic(*pgto, KM_USER0);
memcpy(vto + pgbase, p, copy);
kunmap_atomic(vto, KM_USER0);
len -= copy;
if (len == 0)
break;
pgbase += copy;
if (pgbase == PAGE_CACHE_SIZE) {
flush_dcache_page(*pgto);
pgbase = 0;
pgto++;
}
p += copy;
}
flush_dcache_page(*pgto);
}
/*
* _copy_from_pages
* @p: pointer to destination
* @pages: array of pages
* @pgbase: offset of source data
* @len: length
*
* Copies data into an arbitrary memory location from an array of pages
* The copy is assumed to be non-overlapping.
*/
static void
_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
{
struct page **pgfrom;
char *vfrom;
size_t copy;
pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
pgbase &= ~PAGE_CACHE_MASK;
do {
copy = PAGE_CACHE_SIZE - pgbase;
if (copy > len)
copy = len;
vfrom = kmap_atomic(*pgfrom, KM_USER0);
memcpy(p, vfrom + pgbase, copy);
kunmap_atomic(vfrom, KM_USER0);
pgbase += copy;
if (pgbase == PAGE_CACHE_SIZE) {
pgbase = 0;
pgfrom++;
}
p += copy;
} while ((len -= copy) != 0);
}
/*
* xdr_shrink_bufhead
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
*
* Shrinks XDR buffer's header kvec buf->head[0] by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
static void
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
{
struct kvec *head, *tail;
size_t copy, offs;
unsigned int pglen = buf->page_len;
tail = buf->tail;
head = buf->head;
BUG_ON (len > head->iov_len);
/* Shift the tail first */
if (tail->iov_len != 0) {
if (tail->iov_len > len) {
copy = tail->iov_len - len;
memmove((char *)tail->iov_base + len,
tail->iov_base, copy);
}
/* Copy from the inlined pages into the tail */
copy = len;
if (copy > pglen)
copy = pglen;
offs = len - copy;
if (offs >= tail->iov_len)
copy = 0;
else if (copy > tail->iov_len - offs)
copy = tail->iov_len - offs;
if (copy != 0)
_copy_from_pages((char *)tail->iov_base + offs,
buf->pages,
buf->page_base + pglen + offs - len,
copy);
/* Do we also need to copy data from the head into the tail ? */
if (len > pglen) {
offs = copy = len - pglen;
if (copy > tail->iov_len)
copy = tail->iov_len;
memcpy(tail->iov_base,
(char *)head->iov_base +
head->iov_len - offs,
copy);
}
}
/* Now handle pages */
if (pglen != 0) {
if (pglen > len)
_shift_data_right_pages(buf->pages,
buf->page_base + len,
buf->page_base,
pglen - len);
copy = len;
if (len > pglen)
copy = pglen;
_copy_to_pages(buf->pages, buf->page_base,
(char *)head->iov_base + head->iov_len - len,
copy);
}
head->iov_len -= len;
buf->buflen -= len;
/* Have we truncated the message? */
if (buf->len > buf->buflen)
buf->len = buf->buflen;
}
/*
* xdr_shrink_pagelen
* @buf: xdr_buf
* @len: bytes to remove from buf->pages
*
* Shrinks XDR buffer's page array buf->pages by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the tail.
*/
static void
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
struct kvec *tail;
size_t copy;
unsigned int pglen = buf->page_len;
unsigned int tailbuf_len;
tail = buf->tail;
BUG_ON (len > pglen);
tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
/* Shift the tail first */
if (tailbuf_len != 0) {
unsigned int free_space = tailbuf_len - tail->iov_len;
if (len < free_space)
free_space = len;
tail->iov_len += free_space;
copy = len;
if (tail->iov_len > len) {
char *p = (char *)tail->iov_base + len;
memmove(p, tail->iov_base, tail->iov_len - len);
} else
copy = tail->iov_len;
/* Copy from the inlined pages into the tail */
_copy_from_pages((char *)tail->iov_base,
buf->pages, buf->page_base + pglen - len,
copy);
}
buf->page_len -= len;
buf->buflen -= len;
/* Have we truncated the message? */
if (buf->len > buf->buflen)
buf->len = buf->buflen;
}
void
xdr_shift_buf(struct xdr_buf *buf, size_t len)
{
xdr_shrink_bufhead(buf, len);
}
EXPORT_SYMBOL_GPL(xdr_shift_buf);
/**
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer in which to encode data
* @p: current pointer inside XDR buffer
*
* Note: at the moment the RPC client only passes the length of our
* scratch buffer in the xdr_buf's header kvec. Previously this
* meant we needed to call xdr_adjust_iovec() after encoding the
* data. With the new scheme, the xdr_stream manages the details
* of the buffer length, and takes care of adjusting the kvec
* length for us.
*/
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
{
struct kvec *iov = buf->head;
int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
BUG_ON(scratch_len < 0);
xdr->buf = buf;
xdr->iov = iov;
xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
BUG_ON(iov->iov_len > scratch_len);
if (p != xdr->p && p != NULL) {
size_t len;
BUG_ON(p < xdr->p || p > xdr->end);
len = (char *)p - (char *)xdr->p;
xdr->p = p;
buf->len += len;
iov->iov_len += len;
}
}
EXPORT_SYMBOL_GPL(xdr_init_encode);
/**
* xdr_reserve_space - Reserve buffer space for sending
* @xdr: pointer to xdr_stream
* @nbytes: number of bytes to reserve
*
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
* adjust the length of the current kvec.
*/
__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
__be32 *q;
/* align nbytes on the next 32-bit boundary */
nbytes += 3;
nbytes &= ~3;
q = p + (nbytes >> 2);
if (unlikely(q > xdr->end || q < p))
return NULL;
xdr->p = q;
xdr->iov->iov_len += nbytes;
xdr->buf->len += nbytes;
return p;
}
EXPORT_SYMBOL_GPL(xdr_reserve_space);
/**
* xdr_write_pages - Insert a list of pages into an XDR buffer for sending
* @xdr: pointer to xdr_stream
* @pages: list of pages
* @base: offset of first byte
* @len: length of data in bytes
*
*/
void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
struct kvec *iov = buf->tail;
buf->pages = pages;
buf->page_base = base;
buf->page_len = len;
iov->iov_base = (char *)xdr->p;
iov->iov_len = 0;
xdr->iov = iov;
if (len & 3) {
unsigned int pad = 4 - (len & 3);
BUG_ON(xdr->p >= xdr->end);
iov->iov_base = (char *)xdr->p + (len & 3);
iov->iov_len += pad;
len += pad;
*xdr->p++ = 0;
}
buf->buflen += len;
buf->len += len;
}
EXPORT_SYMBOL_GPL(xdr_write_pages);
static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
__be32 *p, unsigned int len)
{
if (len > iov->iov_len)
len = iov->iov_len;
if (p == NULL)
p = (__be32*)iov->iov_base;
xdr->p = p;
xdr->end = (__be32*)(iov->iov_base + len);
xdr->iov = iov;
xdr->page_ptr = NULL;
}
static int xdr_set_page_base(struct xdr_stream *xdr,
unsigned int base, unsigned int len)
{
unsigned int pgnr;
unsigned int maxlen;
unsigned int pgoff;
unsigned int pgend;
void *kaddr;
maxlen = xdr->buf->page_len;
if (base >= maxlen)
return -EINVAL;
maxlen -= base;
if (len > maxlen)
len = maxlen;
base += xdr->buf->page_base;
pgnr = base >> PAGE_SHIFT;
xdr->page_ptr = &xdr->buf->pages[pgnr];
kaddr = page_address(*xdr->page_ptr);
pgoff = base & ~PAGE_MASK;
xdr->p = (__be32*)(kaddr + pgoff);
pgend = pgoff + len;
if (pgend > PAGE_SIZE)
pgend = PAGE_SIZE;
xdr->end = (__be32*)(kaddr + pgend);
xdr->iov = NULL;
return 0;
}
static void xdr_set_next_page(struct xdr_stream *xdr)
{
unsigned int newbase;
newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
newbase -= xdr->buf->page_base;
if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
}
static bool xdr_set_next_buffer(struct xdr_stream *xdr)
{
if (xdr->page_ptr != NULL)
xdr_set_next_page(xdr);
else if (xdr->iov == xdr->buf->head) {
if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
}
return xdr->p != xdr->end;
}
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer from which to decode data
* @p: current pointer inside XDR buffer
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
{
xdr->buf = buf;
xdr->scratch.iov_base = NULL;
xdr->scratch.iov_len = 0;
if (buf->head[0].iov_len != 0)
xdr_set_iov(xdr, buf->head, p, buf->len);
else if (buf->page_len != 0)
xdr_set_page_base(xdr, 0, buf->len);
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer from which to decode data
* @pages: list of pages to decode into
* @len: length in bytes of buffer in pages
*/
void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len)
{
memset(buf, 0, sizeof(*buf));
buf->pages = pages;
buf->page_len = len;
buf->buflen = len;
buf->len = len;
xdr_init_decode(xdr, buf, NULL);
}
EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
__be32 *q = p + XDR_QUADLEN(nbytes);
if (unlikely(q > xdr->end || q < p))
return NULL;
xdr->p = q;
return p;
}
/**
* xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to an empty buffer
* @buflen: size of 'buf'
*
* The scratch buffer is used when decoding from an array of pages.
* If an xdr_inline_decode() call spans across page boundaries, then
* we copy the data into the scratch buffer in order to allow linear
* access.
*/
void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
{
xdr->scratch.iov_base = buf;
xdr->scratch.iov_len = buflen;
}
EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p;
void *cpdest = xdr->scratch.iov_base;
size_t cplen = (char *)xdr->end - (char *)xdr->p;
if (nbytes > xdr->scratch.iov_len)
return NULL;
memcpy(cpdest, xdr->p, cplen);
cpdest += cplen;
nbytes -= cplen;
if (!xdr_set_next_buffer(xdr))
return NULL;
p = __xdr_inline_decode(xdr, nbytes);
if (p == NULL)
return NULL;
memcpy(cpdest, p, nbytes);
return xdr->scratch.iov_base;
}
/**
* xdr_inline_decode - Retrieve XDR data to decode
* @xdr: pointer to xdr_stream struct
* @nbytes: number of bytes of data to decode
*
* Check if the input buffer is long enough to enable us to decode
* 'nbytes' more bytes of data starting at the current position.
* If so return the current pointer, then update the current
* pointer position.
*/
__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p;
if (nbytes == 0)
return xdr->p;
if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return NULL;
p = __xdr_inline_decode(xdr, nbytes);
if (p != NULL)
return p;
return xdr_copy_to_scratch(xdr, nbytes);
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
/**
* xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
* @xdr: pointer to xdr_stream struct
* @len: number of bytes of page data
*
* Moves data beyond the current pointer position from the XDR head[] buffer
* into the page list. Any data that lies beyond current position + "len"
* bytes is moved into the XDR tail[].
*/
void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
struct kvec *iov;
ssize_t shift;
unsigned int end;
int padding;
/* Realign pages to current pointer position */
iov = buf->head;
shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
if (shift > 0)
xdr_shrink_bufhead(buf, shift);
/* Truncate page data and move it into the tail */
if (buf->page_len > len)
xdr_shrink_pagelen(buf, buf->page_len - len);
padding = (XDR_QUADLEN(len) << 2) - len;
xdr->iov = iov = buf->tail;
/* Compute remaining message length. */
end = iov->iov_len;
shift = buf->buflen - buf->len;
if (shift < end)
end -= shift;
else if (shift > 0)
end = 0;
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
xdr->p = (__be32 *)((char *)iov->iov_base + padding);
xdr->end = (__be32 *)((char *)iov->iov_base + end);
}
EXPORT_SYMBOL_GPL(xdr_read_pages);
/**
* xdr_enter_page - decode data from the XDR page
* @xdr: pointer to xdr_stream struct
* @len: number of bytes of page data
*
* Moves data beyond the current pointer position from the XDR head[] buffer
* into the page list. Any data that lies beyond current position + "len"
* bytes is moved into the XDR tail[]. The current pointer is then
* repositioned at the beginning of the first XDR page.
*/
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
{
xdr_read_pages(xdr, len);
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
xdr_set_page_base(xdr, 0, len);
}
EXPORT_SYMBOL_GPL(xdr_enter_page);
static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
void
xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
{
buf->head[0] = *iov;
buf->tail[0] = empty_iov;
buf->page_len = 0;
buf->buflen = buf->len = iov->iov_len;
}
EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
/* Sets subbuf to the portion of buf of length len beginning base bytes
* from the start of buf. Returns -1 if base of length are out of bounds. */
int
xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
unsigned int base, unsigned int len)
{
subbuf->buflen = subbuf->len = len;
if (base < buf->head[0].iov_len) {
subbuf->head[0].iov_base = buf->head[0].iov_base + base;
subbuf->head[0].iov_len = min_t(unsigned int, len,
buf->head[0].iov_len - base);
len -= subbuf->head[0].iov_len;
base = 0;
} else {
subbuf->head[0].iov_base = NULL;
subbuf->head[0].iov_len = 0;
base -= buf->head[0].iov_len;
}
if (base < buf->page_len) {
subbuf->page_len = min(buf->page_len - base, len);
base += buf->page_base;
subbuf->page_base = base & ~PAGE_CACHE_MASK;
subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
len -= subbuf->page_len;
base = 0;
} else {
base -= buf->page_len;
subbuf->page_len = 0;
}
if (base < buf->tail[0].iov_len) {
subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
subbuf->tail[0].iov_len = min_t(unsigned int, len,
buf->tail[0].iov_len - base);
len -= subbuf->tail[0].iov_len;
base = 0;
} else {
subbuf->tail[0].iov_base = NULL;
subbuf->tail[0].iov_len = 0;
base -= buf->tail[0].iov_len;
}
if (base || len)
return -1;
return 0;
}
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
unsigned int this_len;
this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
memcpy(obj, subbuf->head[0].iov_base, this_len);
len -= this_len;
obj += this_len;
this_len = min_t(unsigned int, len, subbuf->page_len);
if (this_len)
_copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
len -= this_len;
obj += this_len;
this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
memcpy(obj, subbuf->tail[0].iov_base, this_len);
}
/* obj is assumed to point to allocated memory of size at least len: */
int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
{
struct xdr_buf subbuf;
int status;
status = xdr_buf_subsegment(buf, &subbuf, base, len);
if (status != 0)
return status;
__read_bytes_from_xdr_buf(&subbuf, obj, len);
return 0;
}
EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
unsigned int this_len;
this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
memcpy(subbuf->head[0].iov_base, obj, this_len);
len -= this_len;
obj += this_len;
this_len = min_t(unsigned int, len, subbuf->page_len);
if (this_len)
_copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
len -= this_len;
obj += this_len;
this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
memcpy(subbuf->tail[0].iov_base, obj, this_len);
}
/* obj is assumed to point to allocated memory of size at least len: */
int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
{
struct xdr_buf subbuf;
int status;
status = xdr_buf_subsegment(buf, &subbuf, base, len);
if (status != 0)
return status;
__write_bytes_to_xdr_buf(&subbuf, obj, len);
return 0;
}
EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
int
xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
{
__be32 raw;
int status;
status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
if (status)
return status;
*obj = be32_to_cpu(raw);
return 0;
}
EXPORT_SYMBOL_GPL(xdr_decode_word);
int
xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
{
__be32 raw = cpu_to_be32(obj);
return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
}
EXPORT_SYMBOL_GPL(xdr_encode_word);
/* If the netobj starting offset bytes from the start of xdr_buf is contained
* entirely in the head or the tail, set object to point to it; otherwise
* try to find space for it at the end of the tail, copy it there, and
* set obj to point to it. */
int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
{
struct xdr_buf subbuf;
if (xdr_decode_word(buf, offset, &obj->len))
return -EFAULT;
if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
return -EFAULT;
/* Is the obj contained entirely in the head? */
obj->data = subbuf.head[0].iov_base;
if (subbuf.head[0].iov_len == obj->len)
return 0;
/* ..or is the obj contained entirely in the tail? */
obj->data = subbuf.tail[0].iov_base;
if (subbuf.tail[0].iov_len == obj->len)
return 0;
/* use end of tail as storage for obj:
* (We don't copy to the beginning because then we'd have
* to worry about doing a potentially overlapping copy.
* This assumes the object is at most half the length of the
* tail.) */
if (obj->len > buf->buflen - buf->len)
return -ENOMEM;
if (buf->tail[0].iov_len != 0)
obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
else
obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
return 0;
}
EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
/* Returns 0 on success, or else a negative error code. */
static int
xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc, int encode)
{
char *elem = NULL, *c;
unsigned int copied = 0, todo, avail_here;
struct page **ppages = NULL;
int err;
if (encode) {
if (xdr_encode_word(buf, base, desc->array_len) != 0)
return -EINVAL;
} else {
if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
desc->array_len > desc->array_maxlen ||
(unsigned long) base + 4 + desc->array_len *
desc->elem_size > buf->len)
return -EINVAL;
}
base += 4;
if (!desc->xcode)
return 0;
todo = desc->array_len * desc->elem_size;
/* process head */
if (todo && base < buf->head->iov_len) {
c = buf->head->iov_base + base;
avail_here = min_t(unsigned int, todo,
buf->head->iov_len - base);
todo -= avail_here;
while (avail_here >= desc->elem_size) {
err = desc->xcode(desc, c);
if (err)
goto out;
c += desc->elem_size;
avail_here -= desc->elem_size;
}
if (avail_here) {
if (!elem) {
elem = kmalloc(desc->elem_size, GFP_KERNEL);
err = -ENOMEM;
if (!elem)
goto out;
}
if (encode) {
err = desc->xcode(desc, elem);
if (err)
goto out;
memcpy(c, elem, avail_here);
} else
memcpy(elem, c, avail_here);
copied = avail_here;
}
base = buf->head->iov_len; /* align to start of pages */
}
/* process pages array */
base -= buf->head->iov_len;
if (todo && base < buf->page_len) {
unsigned int avail_page;
avail_here = min(todo, buf->page_len - base);
todo -= avail_here;
base += buf->page_base;
ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
base &= ~PAGE_CACHE_MASK;
avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
avail_here);
c = kmap(*ppages) + base;
while (avail_here) {
avail_here -= avail_page;
if (copied || avail_page < desc->elem_size) {
unsigned int l = min(avail_page,
desc->elem_size - copied);
if (!elem) {
elem = kmalloc(desc->elem_size,
GFP_KERNEL);
err = -ENOMEM;
if (!elem)
goto out;
}
if (encode) {
if (!copied) {
err = desc->xcode(desc, elem);
if (err)
goto out;
}
memcpy(c, elem + copied, l);
copied += l;
if (copied == desc->elem_size)
copied = 0;
} else {
memcpy(elem + copied, c, l);
copied += l;
if (copied == desc->elem_size) {
err = desc->xcode(desc, elem);
if (err)
goto out;
copied = 0;
}
}
avail_page -= l;
c += l;
}
while (avail_page >= desc->elem_size) {
err = desc->xcode(desc, c);
if (err)
goto out;
c += desc->elem_size;
avail_page -= desc->elem_size;
}
if (avail_page) {
unsigned int l = min(avail_page,
desc->elem_size - copied);
if (!elem) {
elem = kmalloc(desc->elem_size,
GFP_KERNEL);
err = -ENOMEM;
if (!elem)
goto out;
}
if (encode) {
if (!copied) {
err = desc->xcode(desc, elem);
if (err)
goto out;
}
memcpy(c, elem + copied, l);
copied += l;
if (copied == desc->elem_size)
copied = 0;
} else {
memcpy(elem + copied, c, l);
copied += l;
if (copied == desc->elem_size) {
err = desc->xcode(desc, elem);
if (err)
goto out;
copied = 0;
}
}
}
if (avail_here) {
kunmap(*ppages);
ppages++;
c = kmap(*ppages);
}
avail_page = min(avail_here,
(unsigned int) PAGE_CACHE_SIZE);
}
base = buf->page_len; /* align to start of tail */
}
/* process tail */
base -= buf->page_len;
if (todo) {
c = buf->tail->iov_base + base;
if (copied) {
unsigned int l = desc->elem_size - copied;
if (encode)
memcpy(c, elem + copied, l);
else {
memcpy(elem + copied, c, l);
err = desc->xcode(desc, elem);
if (err)
goto out;
}
todo -= l;
c += l;
}
while (todo) {
err = desc->xcode(desc, c);
if (err)
goto out;
c += desc->elem_size;
todo -= desc->elem_size;
}
}
err = 0;
out:
kfree(elem);
if (ppages)
kunmap(*ppages);
return err;
}
int
xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc)
{
if (base >= buf->len)
return -EINVAL;
return xdr_xcode_array2(buf, base, desc, 0);
}
EXPORT_SYMBOL_GPL(xdr_decode_array2);
int
xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc)
{
if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
buf->head->iov_len + buf->page_len + buf->tail->iov_len)
return -EINVAL;
return xdr_xcode_array2(buf, base, desc, 1);
}
EXPORT_SYMBOL_GPL(xdr_encode_array2);
int
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
int (*actor)(struct scatterlist *, void *), void *data)
{
int i, ret = 0;
unsigned page_len, thislen, page_offset;
struct scatterlist sg[1];
sg_init_table(sg, 1);
if (offset >= buf->head[0].iov_len) {
offset -= buf->head[0].iov_len;
} else {
thislen = buf->head[0].iov_len - offset;
if (thislen > len)
thislen = len;
sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
ret = actor(sg, data);
if (ret)
goto out;
offset = 0;
len -= thislen;
}
if (len == 0)
goto out;
if (offset >= buf->page_len) {
offset -= buf->page_len;
} else {
page_len = buf->page_len - offset;
if (page_len > len)
page_len = len;
len -= page_len;
page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
thislen = PAGE_CACHE_SIZE - page_offset;
do {
if (thislen > page_len)
thislen = page_len;
sg_set_page(sg, buf->pages[i], thislen, page_offset);
ret = actor(sg, data);
if (ret)
goto out;
page_len -= thislen;
i++;
page_offset = 0;
thislen = PAGE_CACHE_SIZE;
} while (page_len != 0);
offset = 0;
}
if (len == 0)
goto out;
if (offset < buf->tail[0].iov_len) {
thislen = buf->tail[0].iov_len - offset;
if (thislen > len)
thislen = len;
sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
ret = actor(sg, data);
len -= thislen;
}
if (len != 0)
ret = -EINVAL;
out:
return ret;
}
EXPORT_SYMBOL_GPL(xdr_process_buf);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3527_4 |
crossvul-cpp_data_bad_2020_8 | /*-------------------------------------------------------------------------
*
* geo_ops.c
* 2D geometric operations
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/utils/adt/geo_ops.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <math.h>
#include <limits.h>
#include <float.h>
#include <ctype.h>
#include "libpq/pqformat.h"
#include "utils/builtins.h"
#include "utils/geo_decls.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
/*
* Internal routines
*/
enum path_delim { PATH_NONE, PATH_OPEN, PATH_CLOSED };
static int point_inside(Point *p, int npts, Point *plist);
static int lseg_crossing(double x, double y, double px, double py);
static BOX *box_construct(double x1, double x2, double y1, double y2);
static BOX *box_copy(BOX *box);
static BOX *box_fill(BOX *result, double x1, double x2, double y1, double y2);
static bool box_ov(BOX *box1, BOX *box2);
static double box_ht(BOX *box);
static double box_wd(BOX *box);
static double circle_ar(CIRCLE *circle);
static CIRCLE *circle_copy(CIRCLE *circle);
static LINE *line_construct_pm(Point *pt, double m);
static void line_construct_pts(LINE *line, Point *pt1, Point *pt2);
static bool lseg_intersect_internal(LSEG *l1, LSEG *l2);
static double lseg_dt(LSEG *l1, LSEG *l2);
static bool on_ps_internal(Point *pt, LSEG *lseg);
static void make_bound_box(POLYGON *poly);
static bool plist_same(int npts, Point *p1, Point *p2);
static Point *point_construct(double x, double y);
static Point *point_copy(Point *pt);
static int single_decode(char *str, float8 *x, char **ss);
static int single_encode(float8 x, char *str);
static int pair_decode(char *str, float8 *x, float8 *y, char **s);
static int pair_encode(float8 x, float8 y, char *str);
static int pair_count(char *s, char delim);
static int path_decode(int opentype, int npts, char *str, int *isopen, char **ss, Point *p);
static char *path_encode(enum path_delim path_delim, int npts, Point *pt);
static void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
static double box_ar(BOX *box);
static void box_cn(Point *center, BOX *box);
static Point *interpt_sl(LSEG *lseg, LINE *line);
static bool has_interpt_sl(LSEG *lseg, LINE *line);
static double dist_pl_internal(Point *pt, LINE *line);
static double dist_ps_internal(Point *pt, LSEG *lseg);
static Point *line_interpt_internal(LINE *l1, LINE *l2);
static bool lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start);
static Point *lseg_interpt_internal(LSEG *l1, LSEG *l2);
/*
* Delimiters for input and output strings.
* LDELIM, RDELIM, and DELIM are left, right, and separator delimiters, respectively.
* LDELIM_EP, RDELIM_EP are left and right delimiters for paths with endpoints.
*/
#define LDELIM '('
#define RDELIM ')'
#define DELIM ','
#define LDELIM_EP '['
#define RDELIM_EP ']'
#define LDELIM_C '<'
#define RDELIM_C '>'
/* Maximum number of characters printed by pair_encode() */
/* ...+3+7 : 3 accounts for extra_float_digits max value */
#define P_MAXLEN (2*(DBL_DIG+3+7)+1)
/*
* Geometric data types are composed of points.
* This code tries to support a common format throughout the data types,
* to allow for more predictable usage and data type conversion.
* The fundamental unit is the point. Other units are line segments,
* open paths, boxes, closed paths, and polygons (which should be considered
* non-intersecting closed paths).
*
* Data representation is as follows:
* point: (x,y)
* line segment: [(x1,y1),(x2,y2)]
* box: (x1,y1),(x2,y2)
* open path: [(x1,y1),...,(xn,yn)]
* closed path: ((x1,y1),...,(xn,yn))
* polygon: ((x1,y1),...,(xn,yn))
*
* For boxes, the points are opposite corners with the first point at the top right.
* For closed paths and polygons, the points should be reordered to allow
* fast and correct equality comparisons.
*
* XXX perhaps points in complex shapes should be reordered internally
* to allow faster internal operations, but should keep track of input order
* and restore that order for text output - tgl 97/01/16
*/
static int
single_decode(char *str, float8 *x, char **s)
{
char *cp;
if (!PointerIsValid(str))
return FALSE;
while (isspace((unsigned char) *str))
str++;
*x = strtod(str, &cp);
#ifdef GEODEBUG
printf("single_decode- (%x) try decoding %s to %g\n", (cp - str), str, *x);
#endif
if (cp <= str)
return FALSE;
while (isspace((unsigned char) *cp))
cp++;
if (s != NULL)
*s = cp;
return TRUE;
} /* single_decode() */
static int
single_encode(float8 x, char *str)
{
int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
sprintf(str, "%.*g", ndig, x);
return TRUE;
} /* single_encode() */
static int
pair_decode(char *str, float8 *x, float8 *y, char **s)
{
int has_delim;
char *cp;
if (!PointerIsValid(str))
return FALSE;
while (isspace((unsigned char) *str))
str++;
if ((has_delim = (*str == LDELIM)))
str++;
while (isspace((unsigned char) *str))
str++;
*x = strtod(str, &cp);
if (cp <= str)
return FALSE;
while (isspace((unsigned char) *cp))
cp++;
if (*cp++ != DELIM)
return FALSE;
while (isspace((unsigned char) *cp))
cp++;
*y = strtod(cp, &str);
if (str <= cp)
return FALSE;
while (isspace((unsigned char) *str))
str++;
if (has_delim)
{
if (*str != RDELIM)
return FALSE;
str++;
while (isspace((unsigned char) *str))
str++;
}
if (s != NULL)
*s = str;
return TRUE;
}
static int
pair_encode(float8 x, float8 y, char *str)
{
int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
sprintf(str, "%.*g,%.*g", ndig, x, ndig, y);
return TRUE;
}
static int
path_decode(int opentype, int npts, char *str, int *isopen, char **ss, Point *p)
{
int depth = 0;
char *s,
*cp;
int i;
s = str;
while (isspace((unsigned char) *s))
s++;
if ((*isopen = (*s == LDELIM_EP)))
{
/* no open delimiter allowed? */
if (!opentype)
return FALSE;
depth++;
s++;
while (isspace((unsigned char) *s))
s++;
}
else if (*s == LDELIM)
{
cp = (s + 1);
while (isspace((unsigned char) *cp))
cp++;
if (*cp == LDELIM)
{
#ifdef NOT_USED
/* nested delimiters with only one point? */
if (npts <= 1)
return FALSE;
#endif
depth++;
s = cp;
}
else if (strrchr(s, LDELIM) == s)
{
depth++;
s = cp;
}
}
for (i = 0; i < npts; i++)
{
if (!pair_decode(s, &(p->x), &(p->y), &s))
return FALSE;
if (*s == DELIM)
s++;
p++;
}
while (depth > 0)
{
if ((*s == RDELIM)
|| ((*s == RDELIM_EP) && (*isopen) && (depth == 1)))
{
depth--;
s++;
while (isspace((unsigned char) *s))
s++;
}
else
return FALSE;
}
*ss = s;
return TRUE;
} /* path_decode() */
static char *
path_encode(enum path_delim path_delim, int npts, Point *pt)
{
int size = npts * (P_MAXLEN + 3) + 2;
char *result;
char *cp;
int i;
/* Check for integer overflow */
if ((size - 2) / npts != (P_MAXLEN + 3))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
result = palloc(size);
cp = result;
switch (path_delim)
{
case PATH_CLOSED:
*cp++ = LDELIM;
break;
case PATH_OPEN:
*cp++ = LDELIM_EP;
break;
case PATH_NONE:
break;
}
for (i = 0; i < npts; i++)
{
*cp++ = LDELIM;
if (!pair_encode(pt->x, pt->y, cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not format \"path\" value")));
cp += strlen(cp);
*cp++ = RDELIM;
*cp++ = DELIM;
pt++;
}
cp--;
switch (path_delim)
{
case PATH_CLOSED:
*cp++ = RDELIM;
break;
case PATH_OPEN:
*cp++ = RDELIM_EP;
break;
case PATH_NONE:
break;
}
*cp = '\0';
return result;
} /* path_encode() */
/*-------------------------------------------------------------
* pair_count - count the number of points
* allow the following notation:
* '((1,2),(3,4))'
* '(1,3,2,4)'
* require an odd number of delim characters in the string
*-------------------------------------------------------------*/
static int
pair_count(char *s, char delim)
{
int ndelim = 0;
while ((s = strchr(s, delim)) != NULL)
{
ndelim++;
s++;
}
return (ndelim % 2) ? ((ndelim + 1) / 2) : -1;
}
/***********************************************************************
**
** Routines for two-dimensional boxes.
**
***********************************************************************/
/*----------------------------------------------------------
* Formatting and conversion routines.
*---------------------------------------------------------*/
/* box_in - convert a string to internal form.
*
* External format: (two corners of box)
* "(f8, f8), (f8, f8)"
* also supports the older style "(f8, f8, f8, f8)"
*/
Datum
box_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
BOX *box = (BOX *) palloc(sizeof(BOX));
int isopen;
char *s;
double x,
y;
if ((!path_decode(FALSE, 2, str, &isopen, &s, &(box->high)))
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type box: \"%s\"", str)));
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
{
x = box->high.x;
box->high.x = box->low.x;
box->low.x = x;
}
if (box->high.y < box->low.y)
{
y = box->high.y;
box->high.y = box->low.y;
box->low.y = y;
}
PG_RETURN_BOX_P(box);
}
/* box_out - convert a box to external form.
*/
Datum
box_out(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_CSTRING(path_encode(PATH_NONE, 2, &(box->high)));
}
/*
* box_recv - converts external binary format to box
*/
Datum
box_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
BOX *box;
double x,
y;
box = (BOX *) palloc(sizeof(BOX));
box->high.x = pq_getmsgfloat8(buf);
box->high.y = pq_getmsgfloat8(buf);
box->low.x = pq_getmsgfloat8(buf);
box->low.y = pq_getmsgfloat8(buf);
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
{
x = box->high.x;
box->high.x = box->low.x;
box->low.x = x;
}
if (box->high.y < box->low.y)
{
y = box->high.y;
box->high.y = box->low.y;
box->low.y = y;
}
PG_RETURN_BOX_P(box);
}
/*
* box_send - converts box to binary format
*/
Datum
box_send(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, box->high.x);
pq_sendfloat8(&buf, box->high.y);
pq_sendfloat8(&buf, box->low.x);
pq_sendfloat8(&buf, box->low.y);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/* box_construct - fill in a new box.
*/
static BOX *
box_construct(double x1, double x2, double y1, double y2)
{
BOX *result = (BOX *) palloc(sizeof(BOX));
return box_fill(result, x1, x2, y1, y2);
}
/* box_fill - fill in a given box struct
*/
static BOX *
box_fill(BOX *result, double x1, double x2, double y1, double y2)
{
if (x1 > x2)
{
result->high.x = x1;
result->low.x = x2;
}
else
{
result->high.x = x2;
result->low.x = x1;
}
if (y1 > y2)
{
result->high.y = y1;
result->low.y = y2;
}
else
{
result->high.y = y2;
result->low.y = y1;
}
return result;
}
/* box_copy - copy a box
*/
static BOX *
box_copy(BOX *box)
{
BOX *result = (BOX *) palloc(sizeof(BOX));
memcpy((char *) result, (char *) box, sizeof(BOX));
return result;
}
/*----------------------------------------------------------
* Relational operators for BOXes.
* <, >, <=, >=, and == are based on box area.
*---------------------------------------------------------*/
/* box_same - are two boxes identical?
*/
Datum
box_same(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPeq(box1->high.x, box2->high.x) &&
FPeq(box1->low.x, box2->low.x) &&
FPeq(box1->high.y, box2->high.y) &&
FPeq(box1->low.y, box2->low.y));
}
/* box_overlap - does box1 overlap box2?
*/
Datum
box_overlap(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(box_ov(box1, box2));
}
static bool
box_ov(BOX *box1, BOX *box2)
{
return (FPle(box1->low.x, box2->high.x) &&
FPle(box2->low.x, box1->high.x) &&
FPle(box1->low.y, box2->high.y) &&
FPle(box2->low.y, box1->high.y));
}
/* box_left - is box1 strictly left of box2?
*/
Datum
box_left(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPlt(box1->high.x, box2->low.x));
}
/* box_overleft - is the right edge of box1 at or left of
* the right edge of box2?
*
* This is "less than or equal" for the end of a time range,
* when time ranges are stored as rectangles.
*/
Datum
box_overleft(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.x, box2->high.x));
}
/* box_right - is box1 strictly right of box2?
*/
Datum
box_right(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPgt(box1->low.x, box2->high.x));
}
/* box_overright - is the left edge of box1 at or right of
* the left edge of box2?
*
* This is "greater than or equal" for time ranges, when time ranges
* are stored as rectangles.
*/
Datum
box_overright(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->low.x, box2->low.x));
}
/* box_below - is box1 strictly below box2?
*/
Datum
box_below(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPlt(box1->high.y, box2->low.y));
}
/* box_overbelow - is the upper edge of box1 at or below
* the upper edge of box2?
*/
Datum
box_overbelow(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.y, box2->high.y));
}
/* box_above - is box1 strictly above box2?
*/
Datum
box_above(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPgt(box1->low.y, box2->high.y));
}
/* box_overabove - is the lower edge of box1 at or above
* the lower edge of box2?
*/
Datum
box_overabove(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->low.y, box2->low.y));
}
/* box_contained - is box1 contained by box2?
*/
Datum
box_contained(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.x, box2->high.x) &&
FPge(box1->low.x, box2->low.x) &&
FPle(box1->high.y, box2->high.y) &&
FPge(box1->low.y, box2->low.y));
}
/* box_contain - does box1 contain box2?
*/
Datum
box_contain(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->high.x, box2->high.x) &&
FPle(box1->low.x, box2->low.x) &&
FPge(box1->high.y, box2->high.y) &&
FPle(box1->low.y, box2->low.y));
}
/* box_positionop -
* is box1 entirely {above,below} box2?
*
* box_below_eq and box_above_eq are obsolete versions that (probably
* erroneously) accept the equal-boundaries case. Since these are not
* in sync with the box_left and box_right code, they are deprecated and
* not supported in the PG 8.1 rtree operator class extension.
*/
Datum
box_below_eq(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.y, box2->low.y));
}
Datum
box_above_eq(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->low.y, box2->high.y));
}
/* box_relop - is area(box1) relop area(box2), within
* our accuracy constraint?
*/
Datum
box_lt(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPlt(box_ar(box1), box_ar(box2)));
}
Datum
box_gt(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPgt(box_ar(box1), box_ar(box2)));
}
Datum
box_eq(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPeq(box_ar(box1), box_ar(box2)));
}
Datum
box_le(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box_ar(box1), box_ar(box2)));
}
Datum
box_ge(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box_ar(box1), box_ar(box2)));
}
/*----------------------------------------------------------
* "Arithmetic" operators on boxes.
*---------------------------------------------------------*/
/* box_area - returns the area of the box.
*/
Datum
box_area(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box_ar(box));
}
/* box_width - returns the width of the box
* (horizontal magnitude).
*/
Datum
box_width(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box->high.x - box->low.x);
}
/* box_height - returns the height of the box
* (vertical magnitude).
*/
Datum
box_height(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box->high.y - box->low.y);
}
/* box_distance - returns the distance between the
* center points of two boxes.
*/
Datum
box_distance(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
Point a,
b;
box_cn(&a, box1);
box_cn(&b, box2);
PG_RETURN_FLOAT8(HYPOT(a.x - b.x, a.y - b.y));
}
/* box_center - returns the center point of the box.
*/
Datum
box_center(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *result = (Point *) palloc(sizeof(Point));
box_cn(result, box);
PG_RETURN_POINT_P(result);
}
/* box_ar - returns the area of the box.
*/
static double
box_ar(BOX *box)
{
return box_wd(box) * box_ht(box);
}
/* box_cn - stores the centerpoint of the box into *center.
*/
static void
box_cn(Point *center, BOX *box)
{
center->x = (box->high.x + box->low.x) / 2.0;
center->y = (box->high.y + box->low.y) / 2.0;
}
/* box_wd - returns the width (length) of the box
* (horizontal magnitude).
*/
static double
box_wd(BOX *box)
{
return box->high.x - box->low.x;
}
/* box_ht - returns the height of the box
* (vertical magnitude).
*/
static double
box_ht(BOX *box)
{
return box->high.y - box->low.y;
}
/*----------------------------------------------------------
* Funky operations.
*---------------------------------------------------------*/
/* box_intersect -
* returns the overlapping portion of two boxes,
* or NULL if they do not intersect.
*/
Datum
box_intersect(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
BOX *result;
if (!box_ov(box1, box2))
PG_RETURN_NULL();
result = (BOX *) palloc(sizeof(BOX));
result->high.x = Min(box1->high.x, box2->high.x);
result->low.x = Max(box1->low.x, box2->low.x);
result->high.y = Min(box1->high.y, box2->high.y);
result->low.y = Max(box1->low.y, box2->low.y);
PG_RETURN_BOX_P(result);
}
/* box_diagonal -
* returns a line segment which happens to be the
* positive-slope diagonal of "box".
*/
Datum
box_diagonal(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
LSEG *result = (LSEG *) palloc(sizeof(LSEG));
statlseg_construct(result, &box->high, &box->low);
PG_RETURN_LSEG_P(result);
}
/***********************************************************************
**
** Routines for 2D lines.
**
***********************************************************************/
static bool
line_decode(const char *str, LINE *line)
{
char *tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != '{')
return false;
line->A = strtod(str, &tail);
if (tail <= str)
return false;
str = tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != DELIM)
return false;
line->B = strtod(str, &tail);
if (tail <= str)
return false;
str = tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != DELIM)
return false;
line->C = strtod(str, &tail);
if (tail <= str)
return false;
str = tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != '}')
return false;
while (isspace((unsigned char) *str))
str++;
if (*str)
return false;
return true;
}
Datum
line_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
LINE *line;
LSEG lseg;
int isopen;
char *s;
line = (LINE *) palloc(sizeof(LINE));
if (path_decode(TRUE, 2, str, &isopen, &s, &(lseg.p[0])) && *s == '\0')
{
if (FPeq(lseg.p[0].x, lseg.p[1].x) && FPeq(lseg.p[0].y, lseg.p[1].y))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid line specification: must be two distinct points")));
line_construct_pts(line, &lseg.p[0], &lseg.p[1]);
}
else if (line_decode(str, line))
{
if (FPzero(line->A) && FPzero(line->B))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid line specification: A and B cannot both be zero")));
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type line: \"%s\"", str)));
PG_RETURN_LINE_P(line);
}
Datum
line_out(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
PG_RETURN_CSTRING(psprintf("{%.*g,%.*g,%.*g}", ndig, line->A, ndig, line->B, ndig, line->C));
}
/*
* line_recv - converts external binary format to line
*/
Datum
line_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LINE *line;
line = (LINE *) palloc(sizeof(LINE));
line->A = pq_getmsgfloat8(buf);
line->B = pq_getmsgfloat8(buf);
line->C = pq_getmsgfloat8(buf);
PG_RETURN_LINE_P(line);
}
/*
* line_send - converts line to binary format
*/
Datum
line_send(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, line->A);
pq_sendfloat8(&buf, line->B);
pq_sendfloat8(&buf, line->C);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Conversion routines from one line formula to internal.
* Internal form: Ax+By+C=0
*---------------------------------------------------------*/
/* line_construct_pm()
* point-slope
*/
static LINE *
line_construct_pm(Point *pt, double m)
{
LINE *result = (LINE *) palloc(sizeof(LINE));
if (m == DBL_MAX)
{
/* vertical - use "x = C" */
result->A = -1;
result->B = 0;
result->C = pt->x;
}
else
{
/* use "mx - y + yinter = 0" */
result->A = m;
result->B = -1.0;
result->C = pt->y - m * pt->x;
}
return result;
}
/*
* Fill already-allocated LINE struct from two points on the line
*/
static void
line_construct_pts(LINE *line, Point *pt1, Point *pt2)
{
if (FPeq(pt1->x, pt2->x))
{ /* vertical */
/* use "x = C" */
line->A = -1;
line->B = 0;
line->C = pt1->x;
#ifdef GEODEBUG
printf("line_construct_pts- line is vertical\n");
#endif
}
else if (FPeq(pt1->y, pt2->y))
{ /* horizontal */
/* use "y = C" */
line->A = 0;
line->B = -1;
line->C = pt1->y;
#ifdef GEODEBUG
printf("line_construct_pts- line is horizontal\n");
#endif
}
else
{
/* use "mx - y + yinter = 0" */
line->A = (pt2->y - pt1->y) / (pt2->x - pt1->x);
line->B = -1.0;
line->C = pt1->y - line->A * pt1->x;
/* on some platforms, the preceding expression tends to produce -0 */
if (line->C == 0.0)
line->C = 0.0;
#ifdef GEODEBUG
printf("line_construct_pts- line is neither vertical nor horizontal (diffs x=%.*g, y=%.*g\n",
DBL_DIG, (pt2->x - pt1->x), DBL_DIG, (pt2->y - pt1->y));
#endif
}
}
/* line_construct_pp()
* two points
*/
Datum
line_construct_pp(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
LINE *result = (LINE *) palloc(sizeof(LINE));
line_construct_pts(result, pt1, pt2);
PG_RETURN_LINE_P(result);
}
/*----------------------------------------------------------
* Relative position routines.
*---------------------------------------------------------*/
Datum
line_intersect(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(!DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
LinePGetDatum(l2))));
}
Datum
line_parallel(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
if (FPzero(l1->B))
PG_RETURN_BOOL(FPzero(l2->B));
PG_RETURN_BOOL(FPeq(l2->A, l1->A * (l2->B / l1->B)));
}
Datum
line_perp(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
if (FPzero(l1->A))
PG_RETURN_BOOL(FPzero(l2->B));
else if (FPzero(l1->B))
PG_RETURN_BOOL(FPzero(l2->A));
PG_RETURN_BOOL(FPeq(((l1->A * l2->B) / (l1->B * l2->A)), -1.0));
}
Datum
line_vertical(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
PG_RETURN_BOOL(FPzero(line->B));
}
Datum
line_horizontal(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
PG_RETURN_BOOL(FPzero(line->A));
}
Datum
line_eq(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
double k;
if (!FPzero(l2->A))
k = l1->A / l2->A;
else if (!FPzero(l2->B))
k = l1->B / l2->B;
else if (!FPzero(l2->C))
k = l1->C / l2->C;
else
k = 1.0;
PG_RETURN_BOOL(FPeq(l1->A, k * l2->A) &&
FPeq(l1->B, k * l2->B) &&
FPeq(l1->C, k * l2->C));
}
/*----------------------------------------------------------
* Line arithmetic routines.
*---------------------------------------------------------*/
/* line_distance()
* Distance between two lines.
*/
Datum
line_distance(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
float8 result;
Point *tmp;
if (!DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
LinePGetDatum(l2))))
PG_RETURN_FLOAT8(0.0);
if (FPzero(l1->B)) /* vertical? */
PG_RETURN_FLOAT8(fabs(l1->C - l2->C));
tmp = point_construct(0.0, l1->C);
result = dist_pl_internal(tmp, l2);
PG_RETURN_FLOAT8(result);
}
/* line_interpt()
* Point where two lines l1, l2 intersect (if any)
*/
Datum
line_interpt(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
Point *result;
result = line_interpt_internal(l1, l2);
if (result == NULL)
PG_RETURN_NULL();
PG_RETURN_POINT_P(result);
}
/*
* Internal version of line_interpt
*
* returns a NULL pointer if no intersection point
*/
static Point *
line_interpt_internal(LINE *l1, LINE *l2)
{
Point *result;
double x,
y;
/*
* NOTE: if the lines are identical then we will find they are parallel
* and report "no intersection". This is a little weird, but since
* there's no *unique* intersection, maybe it's appropriate behavior.
*/
if (DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
LinePGetDatum(l2))))
return NULL;
if (FPzero(l1->B)) /* l1 vertical? */
{
x = l1->C;
y = (l2->A * x + l2->C);
}
else if (FPzero(l2->B)) /* l2 vertical? */
{
x = l2->C;
y = (l1->A * x + l1->C);
}
else
{
x = (l1->C - l2->C) / (l2->A - l1->A);
y = (l1->A * x + l1->C);
}
result = point_construct(x, y);
#ifdef GEODEBUG
printf("line_interpt- lines are A=%.*g, B=%.*g, C=%.*g, A=%.*g, B=%.*g, C=%.*g\n",
DBL_DIG, l1->A, DBL_DIG, l1->B, DBL_DIG, l1->C, DBL_DIG, l2->A, DBL_DIG, l2->B, DBL_DIG, l2->C);
printf("line_interpt- lines intersect at (%.*g,%.*g)\n", DBL_DIG, x, DBL_DIG, y);
#endif
return result;
}
/***********************************************************************
**
** Routines for 2D paths (sequences of line segments, also
** called `polylines').
**
** This is not a general package for geometric paths,
** which of course include polygons; the emphasis here
** is on (for example) usefulness in wire layout.
**
***********************************************************************/
/*----------------------------------------------------------
* String to path / path to string conversion.
* External format:
* "((xcoord, ycoord),... )"
* "[(xcoord, ycoord),... ]"
* "(xcoord, ycoord),... "
* "[xcoord, ycoord,... ]"
* Also support older format:
* "(closed, npts, xcoord, ycoord,... )"
*---------------------------------------------------------*/
Datum
path_area(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
double area = 0.0;
int i,
j;
if (!path->closed)
PG_RETURN_NULL();
for (i = 0; i < path->npts; i++)
{
j = (i + 1) % path->npts;
area += path->p[i].x * path->p[j].y;
area -= path->p[i].y * path->p[j].x;
}
area *= 0.5;
PG_RETURN_FLOAT8(area < 0.0 ? -area : area);
}
Datum
path_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
PATH *path;
int isopen;
char *s;
int npts;
int size;
int depth = 0;
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type path: \"%s\"", str)));
s = str;
while (isspace((unsigned char) *s))
s++;
/* skip single leading paren */
if ((*s == LDELIM) && (strrchr(s, LDELIM) == s))
{
s++;
depth++;
}
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
SET_VARSIZE(path, size);
path->npts = npts;
if ((!path_decode(TRUE, npts, s, &isopen, &s, &(path->p[0])))
&& (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type path: \"%s\"", str)));
path->closed = (!isopen);
/* prevent instability in unused pad bytes */
path->dummy = 0;
PG_RETURN_PATH_P(path);
}
Datum
path_out(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_CSTRING(path_encode(path->closed ? PATH_CLOSED : PATH_OPEN, path->npts, path->p));
}
/*
* path_recv - converts external binary format to path
*
* External representation is closed flag (a boolean byte), int32 number
* of points, and the points.
*/
Datum
path_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
PATH *path;
int closed;
int32 npts;
int32 i;
int size;
closed = pq_getmsgbyte(buf);
npts = pq_getmsgint(buf, sizeof(int32));
if (npts <= 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid number of points in external \"path\" value")));
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
SET_VARSIZE(path, size);
path->npts = npts;
path->closed = (closed ? 1 : 0);
/* prevent instability in unused pad bytes */
path->dummy = 0;
for (i = 0; i < npts; i++)
{
path->p[i].x = pq_getmsgfloat8(buf);
path->p[i].y = pq_getmsgfloat8(buf);
}
PG_RETURN_PATH_P(path);
}
/*
* path_send - converts path to binary format
*/
Datum
path_send(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
StringInfoData buf;
int32 i;
pq_begintypsend(&buf);
pq_sendbyte(&buf, path->closed ? 1 : 0);
pq_sendint(&buf, path->npts, sizeof(int32));
for (i = 0; i < path->npts; i++)
{
pq_sendfloat8(&buf, path->p[i].x);
pq_sendfloat8(&buf, path->p[i].y);
}
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Relational operators.
* These are based on the path cardinality,
* as stupid as that sounds.
*
* Better relops and access methods coming soon.
*---------------------------------------------------------*/
Datum
path_n_lt(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts < p2->npts);
}
Datum
path_n_gt(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts > p2->npts);
}
Datum
path_n_eq(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts == p2->npts);
}
Datum
path_n_le(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts <= p2->npts);
}
Datum
path_n_ge(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts >= p2->npts);
}
/*----------------------------------------------------------
* Conversion operators.
*---------------------------------------------------------*/
Datum
path_isclosed(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_BOOL(path->closed);
}
Datum
path_isopen(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_BOOL(!path->closed);
}
Datum
path_npoints(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_INT32(path->npts);
}
Datum
path_close(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
path->closed = TRUE;
PG_RETURN_PATH_P(path);
}
Datum
path_open(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
path->closed = FALSE;
PG_RETURN_PATH_P(path);
}
/* path_inter -
* Does p1 intersect p2 at any point?
* Use bounding boxes for a quick (O(n)) check, then do a
* O(n^2) iterative edge check.
*/
Datum
path_inter(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
BOX b1,
b2;
int i,
j;
LSEG seg1,
seg2;
if (p1->npts <= 0 || p2->npts <= 0)
PG_RETURN_BOOL(false);
b1.high.x = b1.low.x = p1->p[0].x;
b1.high.y = b1.low.y = p1->p[0].y;
for (i = 1; i < p1->npts; i++)
{
b1.high.x = Max(p1->p[i].x, b1.high.x);
b1.high.y = Max(p1->p[i].y, b1.high.y);
b1.low.x = Min(p1->p[i].x, b1.low.x);
b1.low.y = Min(p1->p[i].y, b1.low.y);
}
b2.high.x = b2.low.x = p2->p[0].x;
b2.high.y = b2.low.y = p2->p[0].y;
for (i = 1; i < p2->npts; i++)
{
b2.high.x = Max(p2->p[i].x, b2.high.x);
b2.high.y = Max(p2->p[i].y, b2.high.y);
b2.low.x = Min(p2->p[i].x, b2.low.x);
b2.low.y = Min(p2->p[i].y, b2.low.y);
}
if (!box_ov(&b1, &b2))
PG_RETURN_BOOL(false);
/* pairwise check lseg intersections */
for (i = 0; i < p1->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!p1->closed)
continue;
iprev = p1->npts - 1; /* include the closure segment */
}
for (j = 0; j < p2->npts; j++)
{
int jprev;
if (j > 0)
jprev = j - 1;
else
{
if (!p2->closed)
continue;
jprev = p2->npts - 1; /* include the closure segment */
}
statlseg_construct(&seg1, &p1->p[iprev], &p1->p[i]);
statlseg_construct(&seg2, &p2->p[jprev], &p2->p[j]);
if (lseg_intersect_internal(&seg1, &seg2))
PG_RETURN_BOOL(true);
}
}
/* if we dropped through, no two segs intersected */
PG_RETURN_BOOL(false);
}
/* path_distance()
* This essentially does a cartesian product of the lsegs in the
* two paths, and finds the min distance between any two lsegs
*/
Datum
path_distance(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
float8 min = 0.0; /* initialize to keep compiler quiet */
bool have_min = false;
float8 tmp;
int i,
j;
LSEG seg1,
seg2;
for (i = 0; i < p1->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!p1->closed)
continue;
iprev = p1->npts - 1; /* include the closure segment */
}
for (j = 0; j < p2->npts; j++)
{
int jprev;
if (j > 0)
jprev = j - 1;
else
{
if (!p2->closed)
continue;
jprev = p2->npts - 1; /* include the closure segment */
}
statlseg_construct(&seg1, &p1->p[iprev], &p1->p[i]);
statlseg_construct(&seg2, &p2->p[jprev], &p2->p[j]);
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
have_min = true;
}
}
}
if (!have_min)
PG_RETURN_NULL();
PG_RETURN_FLOAT8(min);
}
/*----------------------------------------------------------
* "Arithmetic" operations.
*---------------------------------------------------------*/
Datum
path_length(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
float8 result = 0.0;
int i;
for (i = 0; i < path->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!path->closed)
continue;
iprev = path->npts - 1; /* include the closure segment */
}
result += point_dt(&path->p[iprev], &path->p[i]);
}
PG_RETURN_FLOAT8(result);
}
/***********************************************************************
**
** Routines for 2D points.
**
***********************************************************************/
/*----------------------------------------------------------
* String to point, point to string conversion.
* External format:
* "(x,y)"
* "x,y"
*---------------------------------------------------------*/
Datum
point_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
Point *point;
double x,
y;
char *s;
if (!pair_decode(str, &x, &y, &s) || (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type point: \"%s\"", str)));
point = (Point *) palloc(sizeof(Point));
point->x = x;
point->y = y;
PG_RETURN_POINT_P(point);
}
Datum
point_out(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
PG_RETURN_CSTRING(path_encode(PATH_NONE, 1, pt));
}
/*
* point_recv - converts external binary format to point
*/
Datum
point_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Point *point;
point = (Point *) palloc(sizeof(Point));
point->x = pq_getmsgfloat8(buf);
point->y = pq_getmsgfloat8(buf);
PG_RETURN_POINT_P(point);
}
/*
* point_send - converts point to binary format
*/
Datum
point_send(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, pt->x);
pq_sendfloat8(&buf, pt->y);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
static Point *
point_construct(double x, double y)
{
Point *result = (Point *) palloc(sizeof(Point));
result->x = x;
result->y = y;
return result;
}
static Point *
point_copy(Point *pt)
{
Point *result;
if (!PointerIsValid(pt))
return NULL;
result = (Point *) palloc(sizeof(Point));
result->x = pt->x;
result->y = pt->y;
return result;
}
/*----------------------------------------------------------
* Relational operators for Points.
* Since we do have a sense of coordinates being
* "equal" to a given accuracy (point_vert, point_horiz),
* the other ops must preserve that sense. This means
* that results may, strictly speaking, be a lie (unless
* EPSILON = 0.0).
*---------------------------------------------------------*/
Datum
point_left(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPlt(pt1->x, pt2->x));
}
Datum
point_right(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPgt(pt1->x, pt2->x));
}
Datum
point_above(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPgt(pt1->y, pt2->y));
}
Datum
point_below(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPlt(pt1->y, pt2->y));
}
Datum
point_vert(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPeq(pt1->x, pt2->x));
}
Datum
point_horiz(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPeq(pt1->y, pt2->y));
}
Datum
point_eq(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPeq(pt1->x, pt2->x) && FPeq(pt1->y, pt2->y));
}
Datum
point_ne(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPne(pt1->x, pt2->x) || FPne(pt1->y, pt2->y));
}
/*----------------------------------------------------------
* "Arithmetic" operators on points.
*---------------------------------------------------------*/
Datum
point_distance(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_FLOAT8(HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
}
double
point_dt(Point *pt1, Point *pt2)
{
#ifdef GEODEBUG
printf("point_dt- segment (%f,%f),(%f,%f) length is %f\n",
pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
#endif
return HYPOT(pt1->x - pt2->x, pt1->y - pt2->y);
}
Datum
point_slope(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_FLOAT8(point_sl(pt1, pt2));
}
double
point_sl(Point *pt1, Point *pt2)
{
return (FPeq(pt1->x, pt2->x)
? (double) DBL_MAX
: (pt1->y - pt2->y) / (pt1->x - pt2->x));
}
/***********************************************************************
**
** Routines for 2D line segments.
**
***********************************************************************/
/*----------------------------------------------------------
* String to lseg, lseg to string conversion.
* External forms: "[(x1, y1), (x2, y2)]"
* "(x1, y1), (x2, y2)"
* "x1, y1, x2, y2"
* closed form ok "((x1, y1), (x2, y2))"
* (old form) "(x1, y1, x2, y2)"
*---------------------------------------------------------*/
Datum
lseg_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
LSEG *lseg;
int isopen;
char *s;
lseg = (LSEG *) palloc(sizeof(LSEG));
if ((!path_decode(TRUE, 2, str, &isopen, &s, &(lseg->p[0])))
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type lseg: \"%s\"", str)));
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
#endif
PG_RETURN_LSEG_P(lseg);
}
Datum
lseg_out(PG_FUNCTION_ARGS)
{
LSEG *ls = PG_GETARG_LSEG_P(0);
PG_RETURN_CSTRING(path_encode(PATH_OPEN, 2, (Point *) &(ls->p[0])));
}
/*
* lseg_recv - converts external binary format to lseg
*/
Datum
lseg_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LSEG *lseg;
lseg = (LSEG *) palloc(sizeof(LSEG));
lseg->p[0].x = pq_getmsgfloat8(buf);
lseg->p[0].y = pq_getmsgfloat8(buf);
lseg->p[1].x = pq_getmsgfloat8(buf);
lseg->p[1].y = pq_getmsgfloat8(buf);
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
#endif
PG_RETURN_LSEG_P(lseg);
}
/*
* lseg_send - converts lseg to binary format
*/
Datum
lseg_send(PG_FUNCTION_ARGS)
{
LSEG *ls = PG_GETARG_LSEG_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, ls->p[0].x);
pq_sendfloat8(&buf, ls->p[0].y);
pq_sendfloat8(&buf, ls->p[1].x);
pq_sendfloat8(&buf, ls->p[1].y);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/* lseg_construct -
* form a LSEG from two Points.
*/
Datum
lseg_construct(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
LSEG *result = (LSEG *) palloc(sizeof(LSEG));
result->p[0].x = pt1->x;
result->p[0].y = pt1->y;
result->p[1].x = pt2->x;
result->p[1].y = pt2->y;
#ifdef NOT_USED
result->m = point_sl(pt1, pt2);
#endif
PG_RETURN_LSEG_P(result);
}
/* like lseg_construct, but assume space already allocated */
static void
statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2)
{
lseg->p[0].x = pt1->x;
lseg->p[0].y = pt1->y;
lseg->p[1].x = pt2->x;
lseg->p[1].y = pt2->y;
#ifdef NOT_USED
lseg->m = point_sl(pt1, pt2);
#endif
}
Datum
lseg_length(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
PG_RETURN_FLOAT8(point_dt(&lseg->p[0], &lseg->p[1]));
}
/*----------------------------------------------------------
* Relative position routines.
*---------------------------------------------------------*/
/*
** find intersection of the two lines, and see if it falls on
** both segments.
*/
Datum
lseg_intersect(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(lseg_intersect_internal(l1, l2));
}
static bool
lseg_intersect_internal(LSEG *l1, LSEG *l2)
{
LINE ln;
Point *interpt;
bool retval;
line_construct_pts(&ln, &l2->p[0], &l2->p[1]);
interpt = interpt_sl(l1, &ln);
if (interpt != NULL && on_ps_internal(interpt, l2))
retval = true; /* interpt on l1 and l2 */
else
retval = false;
return retval;
}
Datum
lseg_parallel(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
#ifdef NOT_USED
PG_RETURN_BOOL(FPeq(l1->m, l2->m));
#endif
PG_RETURN_BOOL(FPeq(point_sl(&l1->p[0], &l1->p[1]),
point_sl(&l2->p[0], &l2->p[1])));
}
/* lseg_perp()
* Determine if two line segments are perpendicular.
*
* This code did not get the correct answer for
* '((0,0),(0,1))'::lseg ?-| '((0,0),(1,0))'::lseg
* So, modified it to check explicitly for slope of vertical line
* returned by point_sl() and the results seem better.
* - thomas 1998-01-31
*/
Datum
lseg_perp(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
double m1,
m2;
m1 = point_sl(&(l1->p[0]), &(l1->p[1]));
m2 = point_sl(&(l2->p[0]), &(l2->p[1]));
#ifdef GEODEBUG
printf("lseg_perp- slopes are %g and %g\n", m1, m2);
#endif
if (FPzero(m1))
PG_RETURN_BOOL(FPeq(m2, DBL_MAX));
else if (FPzero(m2))
PG_RETURN_BOOL(FPeq(m1, DBL_MAX));
PG_RETURN_BOOL(FPeq(m1 / m2, -1.0));
}
Datum
lseg_vertical(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
PG_RETURN_BOOL(FPeq(lseg->p[0].x, lseg->p[1].x));
}
Datum
lseg_horizontal(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
PG_RETURN_BOOL(FPeq(lseg->p[0].y, lseg->p[1].y));
}
Datum
lseg_eq(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPeq(l1->p[0].x, l2->p[0].x) &&
FPeq(l1->p[0].y, l2->p[0].y) &&
FPeq(l1->p[1].x, l2->p[1].x) &&
FPeq(l1->p[1].y, l2->p[1].y));
}
Datum
lseg_ne(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(!FPeq(l1->p[0].x, l2->p[0].x) ||
!FPeq(l1->p[0].y, l2->p[0].y) ||
!FPeq(l1->p[1].x, l2->p[1].x) ||
!FPeq(l1->p[1].y, l2->p[1].y));
}
Datum
lseg_lt(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPlt(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
Datum
lseg_le(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPle(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
Datum
lseg_gt(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPgt(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
Datum
lseg_ge(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPge(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
/*----------------------------------------------------------
* Line arithmetic routines.
*---------------------------------------------------------*/
/* lseg_distance -
* If two segments don't intersect, then the closest
* point will be from one of the endpoints to the other
* segment.
*/
Datum
lseg_distance(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_FLOAT8(lseg_dt(l1, l2));
}
/* lseg_dt()
* Distance between two line segments.
* Must check both sets of endpoints to ensure minimum distance is found.
* - thomas 1998-02-01
*/
static double
lseg_dt(LSEG *l1, LSEG *l2)
{
double result,
d;
if (lseg_intersect_internal(l1, l2))
return 0.0;
d = dist_ps_internal(&l1->p[0], l2);
result = d;
d = dist_ps_internal(&l1->p[1], l2);
result = Min(result, d);
d = dist_ps_internal(&l2->p[0], l1);
result = Min(result, d);
d = dist_ps_internal(&l2->p[1], l1);
result = Min(result, d);
return result;
}
Datum
lseg_center(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (lseg->p[0].x + lseg->p[1].x) / 2.0;
result->y = (lseg->p[0].y + lseg->p[1].y) / 2.0;
PG_RETURN_POINT_P(result);
}
static Point *
lseg_interpt_internal(LSEG *l1, LSEG *l2)
{
Point *result;
LINE tmp1,
tmp2;
/*
* Find the intersection of the appropriate lines, if any.
*/
line_construct_pts(&tmp1, &l1->p[0], &l1->p[1]);
line_construct_pts(&tmp2, &l2->p[0], &l2->p[1]);
result = line_interpt_internal(&tmp1, &tmp2);
if (!PointerIsValid(result))
return NULL;
/*
* If the line intersection point isn't within l1 (or equivalently l2),
* there is no valid segment intersection point at all.
*/
if (!on_ps_internal(result, l1) ||
!on_ps_internal(result, l2))
{
pfree(result);
return NULL;
}
/*
* If there is an intersection, then check explicitly for matching
* endpoints since there may be rounding effects with annoying lsb
* residue. - tgl 1997-07-09
*/
if ((FPeq(l1->p[0].x, l2->p[0].x) && FPeq(l1->p[0].y, l2->p[0].y)) ||
(FPeq(l1->p[0].x, l2->p[1].x) && FPeq(l1->p[0].y, l2->p[1].y)))
{
result->x = l1->p[0].x;
result->y = l1->p[0].y;
}
else if ((FPeq(l1->p[1].x, l2->p[0].x) && FPeq(l1->p[1].y, l2->p[0].y)) ||
(FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
{
result->x = l1->p[1].x;
result->y = l1->p[1].y;
}
return result;
}
/* lseg_interpt -
* Find the intersection point of two segments (if any).
*/
Datum
lseg_interpt(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
Point *result;
result = lseg_interpt_internal(l1, l2);
if (!PointerIsValid(result))
PG_RETURN_NULL();
PG_RETURN_POINT_P(result);
}
/***********************************************************************
**
** Routines for position comparisons of differently-typed
** 2D objects.
**
***********************************************************************/
/*---------------------------------------------------------------------
* dist_
* Minimum distance from one object to another.
*-------------------------------------------------------------------*/
Datum
dist_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_FLOAT8(dist_pl_internal(pt, line));
}
static double
dist_pl_internal(Point *pt, LINE *line)
{
return fabs((line->A * pt->x + line->B * pt->y + line->C) /
HYPOT(line->A, line->B));
}
Datum
dist_ps(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
PG_RETURN_FLOAT8(dist_ps_internal(pt, lseg));
}
static double
dist_ps_internal(Point *pt, LSEG *lseg)
{
double m; /* slope of perp. */
LINE *ln;
double result,
tmpdist;
Point *ip;
/*
* Construct a line perpendicular to the input segment and through the
* input point
*/
if (lseg->p[1].x == lseg->p[0].x)
m = 0;
else if (lseg->p[1].y == lseg->p[0].y)
m = (double) DBL_MAX; /* slope is infinite */
else
m = (lseg->p[0].x - lseg->p[1].x) / (lseg->p[1].y - lseg->p[0].y);
ln = line_construct_pm(pt, m);
#ifdef GEODEBUG
printf("dist_ps- line is A=%g B=%g C=%g from (point) slope (%f,%f) %g\n",
ln->A, ln->B, ln->C, pt->x, pt->y, m);
#endif
/*
* Calculate distance to the line segment or to the nearest endpoint of
* the segment.
*/
/* intersection is on the line segment? */
if ((ip = interpt_sl(lseg, ln)) != NULL)
{
/* yes, so use distance to the intersection point */
result = point_dt(pt, ip);
#ifdef GEODEBUG
printf("dist_ps- distance is %f to intersection point is (%f,%f)\n",
result, ip->x, ip->y);
#endif
}
else
{
/* no, so use distance to the nearer endpoint */
result = point_dt(pt, &lseg->p[0]);
tmpdist = point_dt(pt, &lseg->p[1]);
if (tmpdist < result)
result = tmpdist;
}
return result;
}
/*
** Distance from a point to a path
*/
Datum
dist_ppath(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
PATH *path = PG_GETARG_PATH_P(1);
float8 result = 0.0; /* keep compiler quiet */
bool have_min = false;
float8 tmp;
int i;
LSEG lseg;
switch (path->npts)
{
case 0:
/* no points in path? then result is undefined... */
PG_RETURN_NULL();
case 1:
/* one point in path? then get distance between two points... */
result = point_dt(pt, &path->p[0]);
break;
default:
/* make sure the path makes sense... */
Assert(path->npts > 1);
/*
* the distance from a point to a path is the smallest distance
* from the point to any of its constituent segments.
*/
for (i = 0; i < path->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!path->closed)
continue;
iprev = path->npts - 1; /* include the closure segment */
}
statlseg_construct(&lseg, &path->p[iprev], &path->p[i]);
tmp = dist_ps_internal(pt, &lseg);
if (!have_min || tmp < result)
{
result = tmp;
have_min = true;
}
}
break;
}
PG_RETURN_FLOAT8(result);
}
Datum
dist_pb(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
BOX *box = PG_GETARG_BOX_P(1);
float8 result;
Point *near;
near = DatumGetPointP(DirectFunctionCall2(close_pb,
PointPGetDatum(pt),
BoxPGetDatum(box)));
result = point_dt(near, pt);
PG_RETURN_FLOAT8(result);
}
Datum
dist_sl(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
float8 result,
d2;
if (has_interpt_sl(lseg, line))
result = 0.0;
else
{
result = dist_pl_internal(&lseg->p[0], line);
d2 = dist_pl_internal(&lseg->p[1], line);
/* XXX shouldn't we take the min not max? */
if (d2 > result)
result = d2;
}
PG_RETURN_FLOAT8(result);
}
Datum
dist_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
Point *tmp;
Datum result;
tmp = DatumGetPointP(DirectFunctionCall2(close_sb,
LsegPGetDatum(lseg),
BoxPGetDatum(box)));
result = DirectFunctionCall2(dist_pb,
PointPGetDatum(tmp),
BoxPGetDatum(box));
PG_RETURN_DATUM(result);
}
Datum
dist_lb(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
#endif
/* need to think about this one for a while */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"dist_lb\" not implemented")));
PG_RETURN_NULL();
}
Datum
dist_cpoly(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
POLYGON *poly = PG_GETARG_POLYGON_P(1);
float8 result;
float8 d;
int i;
LSEG seg;
if (point_inside(&(circle->center), poly->npts, poly->p) != 0)
{
#ifdef GEODEBUG
printf("dist_cpoly- center inside of polygon\n");
#endif
PG_RETURN_FLOAT8(0.0);
}
/* initialize distance with segment between first and last points */
seg.p[0].x = poly->p[0].x;
seg.p[0].y = poly->p[0].y;
seg.p[1].x = poly->p[poly->npts - 1].x;
seg.p[1].y = poly->p[poly->npts - 1].y;
result = dist_ps_internal(&circle->center, &seg);
#ifdef GEODEBUG
printf("dist_cpoly- segment 0/n distance is %f\n", result);
#endif
/* check distances for other segments */
for (i = 0; (i < poly->npts - 1); i++)
{
seg.p[0].x = poly->p[i].x;
seg.p[0].y = poly->p[i].y;
seg.p[1].x = poly->p[i + 1].x;
seg.p[1].y = poly->p[i + 1].y;
d = dist_ps_internal(&circle->center, &seg);
#ifdef GEODEBUG
printf("dist_cpoly- segment %d distance is %f\n", (i + 1), d);
#endif
if (d < result)
result = d;
}
result -= circle->radius;
if (result < 0)
result = 0;
PG_RETURN_FLOAT8(result);
}
/*---------------------------------------------------------------------
* interpt_
* Intersection point of objects.
* We choose to ignore the "point" of intersection between
* lines and boxes, since there are typically two.
*-------------------------------------------------------------------*/
/* Get intersection point of lseg and line; returns NULL if no intersection */
static Point *
interpt_sl(LSEG *lseg, LINE *line)
{
LINE tmp;
Point *p;
line_construct_pts(&tmp, &lseg->p[0], &lseg->p[1]);
p = line_interpt_internal(&tmp, line);
#ifdef GEODEBUG
printf("interpt_sl- segment is (%.*g %.*g) (%.*g %.*g)\n",
DBL_DIG, lseg->p[0].x, DBL_DIG, lseg->p[0].y, DBL_DIG, lseg->p[1].x, DBL_DIG, lseg->p[1].y);
printf("interpt_sl- segment becomes line A=%.*g B=%.*g C=%.*g\n",
DBL_DIG, tmp.A, DBL_DIG, tmp.B, DBL_DIG, tmp.C);
#endif
if (PointerIsValid(p))
{
#ifdef GEODEBUG
printf("interpt_sl- intersection point is (%.*g %.*g)\n", DBL_DIG, p->x, DBL_DIG, p->y);
#endif
if (on_ps_internal(p, lseg))
{
#ifdef GEODEBUG
printf("interpt_sl- intersection point is on segment\n");
#endif
}
else
p = NULL;
}
return p;
}
/* variant: just indicate if intersection point exists */
static bool
has_interpt_sl(LSEG *lseg, LINE *line)
{
Point *tmp;
tmp = interpt_sl(lseg, line);
if (tmp)
return true;
return false;
}
/*---------------------------------------------------------------------
* close_
* Point of closest proximity between objects.
*-------------------------------------------------------------------*/
/* close_pl -
* The intersection point of a perpendicular of the line
* through the point.
*/
Datum
close_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
Point *result;
LINE *tmp;
double invm;
result = (Point *) palloc(sizeof(Point));
if (FPzero(line->B)) /* vertical? */
{
result->x = line->C;
result->y = pt->y;
PG_RETURN_POINT_P(result);
}
if (FPzero(line->A)) /* horizontal? */
{
result->x = pt->x;
result->y = line->C;
PG_RETURN_POINT_P(result);
}
/* drop a perpendicular and find the intersection point */
/* invert and flip the sign on the slope to get a perpendicular */
invm = line->B / line->A;
tmp = line_construct_pm(pt, invm);
result = line_interpt_internal(tmp, line);
Assert(result != NULL);
PG_RETURN_POINT_P(result);
}
/* close_ps()
* Closest point on line segment to specified point.
* Take the closest endpoint if the point is left, right,
* above, or below the segment, otherwise find the intersection
* point of the segment and its perpendicular through the point.
*
* Some tricky code here, relying on boolean expressions
* evaluating to only zero or one to use as an array index.
* bug fixes by gthaker@atl.lmco.com; May 1, 1998
*/
Datum
close_ps(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
Point *result = NULL;
LINE *tmp;
double invm;
int xh,
yh;
#ifdef GEODEBUG
printf("close_sp:pt->x %f pt->y %f\nlseg(0).x %f lseg(0).y %f lseg(1).x %f lseg(1).y %f\n",
pt->x, pt->y, lseg->p[0].x, lseg->p[0].y,
lseg->p[1].x, lseg->p[1].y);
#endif
/* xh (or yh) is the index of upper x( or y) end point of lseg */
/* !xh (or !yh) is the index of lower x( or y) end point of lseg */
xh = lseg->p[0].x < lseg->p[1].x;
yh = lseg->p[0].y < lseg->p[1].y;
if (FPeq(lseg->p[0].x, lseg->p[1].x)) /* vertical? */
{
#ifdef GEODEBUG
printf("close_ps- segment is vertical\n");
#endif
/* first check if point is below or above the entire lseg. */
if (pt->y < lseg->p[!yh].y)
result = point_copy(&lseg->p[!yh]); /* below the lseg */
else if (pt->y > lseg->p[yh].y)
result = point_copy(&lseg->p[yh]); /* above the lseg */
if (result != NULL)
PG_RETURN_POINT_P(result);
/* point lines along (to left or right) of the vertical lseg. */
result = (Point *) palloc(sizeof(Point));
result->x = lseg->p[0].x;
result->y = pt->y;
PG_RETURN_POINT_P(result);
}
else if (FPeq(lseg->p[0].y, lseg->p[1].y)) /* horizontal? */
{
#ifdef GEODEBUG
printf("close_ps- segment is horizontal\n");
#endif
/* first check if point is left or right of the entire lseg. */
if (pt->x < lseg->p[!xh].x)
result = point_copy(&lseg->p[!xh]); /* left of the lseg */
else if (pt->x > lseg->p[xh].x)
result = point_copy(&lseg->p[xh]); /* right of the lseg */
if (result != NULL)
PG_RETURN_POINT_P(result);
/* point lines along (at top or below) the horiz. lseg. */
result = (Point *) palloc(sizeof(Point));
result->x = pt->x;
result->y = lseg->p[0].y;
PG_RETURN_POINT_P(result);
}
/*
* vert. and horiz. cases are down, now check if the closest point is one
* of the end points or someplace on the lseg.
*/
invm = -1.0 / point_sl(&(lseg->p[0]), &(lseg->p[1]));
tmp = line_construct_pm(&lseg->p[!yh], invm); /* lower edge of the
* "band" */
if (pt->y < (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
result = point_copy(&lseg->p[!yh]); /* below the lseg, take lower
* end pt */
#ifdef GEODEBUG
printf("close_ps below: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
#endif
PG_RETURN_POINT_P(result);
}
tmp = line_construct_pm(&lseg->p[yh], invm); /* upper edge of the
* "band" */
if (pt->y > (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
result = point_copy(&lseg->p[yh]); /* above the lseg, take higher
* end pt */
#ifdef GEODEBUG
printf("close_ps above: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
#endif
PG_RETURN_POINT_P(result);
}
/*
* at this point the "normal" from point will hit lseg. The closet point
* will be somewhere on the lseg
*/
tmp = line_construct_pm(pt, invm);
#ifdef GEODEBUG
printf("close_ps- tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
#endif
result = interpt_sl(lseg, tmp);
Assert(result != NULL);
#ifdef GEODEBUG
printf("close_ps- result.x %f result.y %f\n", result->x, result->y);
#endif
PG_RETURN_POINT_P(result);
}
/* close_lseg()
* Closest point to l1 on l2.
*/
Datum
close_lseg(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
Point *result = NULL;
Point point;
double dist;
double d;
d = dist_ps_internal(&l1->p[0], l2);
dist = d;
memcpy(&point, &l1->p[0], sizeof(Point));
if ((d = dist_ps_internal(&l1->p[1], l2)) < dist)
{
dist = d;
memcpy(&point, &l1->p[1], sizeof(Point));
}
if (dist_ps_internal(&l2->p[0], l1) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&l2->p[0]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if (dist_ps_internal(&l2->p[1], l1) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&l2->p[1]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if (result == NULL)
result = point_copy(&point);
PG_RETURN_POINT_P(result);
}
/* close_pb()
* Closest point on or in box to specified point.
*/
Datum
close_pb(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
BOX *box = PG_GETARG_BOX_P(1);
LSEG lseg,
seg;
Point point;
double dist,
d;
if (DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(pt),
BoxPGetDatum(box))))
PG_RETURN_POINT_P(pt);
/* pairwise check lseg distances */
point.x = box->low.x;
point.y = box->high.y;
statlseg_construct(&lseg, &box->low, &point);
dist = dist_ps_internal(pt, &lseg);
statlseg_construct(&seg, &box->high, &point);
if ((d = dist_ps_internal(pt, &seg)) < dist)
{
dist = d;
memcpy(&lseg, &seg, sizeof(lseg));
}
point.x = box->high.x;
point.y = box->low.y;
statlseg_construct(&seg, &box->low, &point);
if ((d = dist_ps_internal(pt, &seg)) < dist)
{
dist = d;
memcpy(&lseg, &seg, sizeof(lseg));
}
statlseg_construct(&seg, &box->high, &point);
if ((d = dist_ps_internal(pt, &seg)) < dist)
{
dist = d;
memcpy(&lseg, &seg, sizeof(lseg));
}
PG_RETURN_DATUM(DirectFunctionCall2(close_ps,
PointPGetDatum(pt),
LsegPGetDatum(&lseg)));
}
/* close_sl()
* Closest point on line to line segment.
*
* XXX THIS CODE IS WRONG
* The code is actually calculating the point on the line segment
* which is backwards from the routine naming convention.
* Copied code to new routine close_ls() but haven't fixed this one yet.
* - thomas 1998-01-31
*/
Datum
close_sl(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
Point *result;
float8 d1,
d2;
result = interpt_sl(lseg, line);
if (result)
PG_RETURN_POINT_P(result);
d1 = dist_pl_internal(&lseg->p[0], line);
d2 = dist_pl_internal(&lseg->p[1], line);
if (d1 < d2)
result = point_copy(&lseg->p[0]);
else
result = point_copy(&lseg->p[1]);
PG_RETURN_POINT_P(result);
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"close_sl\" not implemented")));
PG_RETURN_NULL();
}
/* close_ls()
* Closest point on line segment to line.
*/
Datum
close_ls(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
Point *result;
float8 d1,
d2;
result = interpt_sl(lseg, line);
if (result)
PG_RETURN_POINT_P(result);
d1 = dist_pl_internal(&lseg->p[0], line);
d2 = dist_pl_internal(&lseg->p[1], line);
if (d1 < d2)
result = point_copy(&lseg->p[0]);
else
result = point_copy(&lseg->p[1]);
PG_RETURN_POINT_P(result);
}
/* close_sb()
* Closest point on or in box to line segment.
*/
Datum
close_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
Point point;
LSEG bseg,
seg;
double dist,
d;
/* segment intersects box? then just return closest point to center */
if (DatumGetBool(DirectFunctionCall2(inter_sb,
LsegPGetDatum(lseg),
BoxPGetDatum(box))))
{
box_cn(&point, box);
PG_RETURN_DATUM(DirectFunctionCall2(close_ps,
PointPGetDatum(&point),
LsegPGetDatum(lseg)));
}
/* pairwise check lseg distances */
point.x = box->low.x;
point.y = box->high.y;
statlseg_construct(&bseg, &box->low, &point);
dist = lseg_dt(lseg, &bseg);
statlseg_construct(&seg, &box->high, &point);
if ((d = lseg_dt(lseg, &seg)) < dist)
{
dist = d;
memcpy(&bseg, &seg, sizeof(bseg));
}
point.x = box->high.x;
point.y = box->low.y;
statlseg_construct(&seg, &box->low, &point);
if ((d = lseg_dt(lseg, &seg)) < dist)
{
dist = d;
memcpy(&bseg, &seg, sizeof(bseg));
}
statlseg_construct(&seg, &box->high, &point);
if ((d = lseg_dt(lseg, &seg)) < dist)
{
dist = d;
memcpy(&bseg, &seg, sizeof(bseg));
}
/* OK, we now have the closest line segment on the box boundary */
PG_RETURN_DATUM(DirectFunctionCall2(close_lseg,
LsegPGetDatum(lseg),
LsegPGetDatum(&bseg)));
}
Datum
close_lb(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
#endif
/* think about this one for a while */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"close_lb\" not implemented")));
PG_RETURN_NULL();
}
/*---------------------------------------------------------------------
* on_
* Whether one object lies completely within another.
*-------------------------------------------------------------------*/
/* on_pl -
* Does the point satisfy the equation?
*/
Datum
on_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(FPzero(line->A * pt->x + line->B * pt->y + line->C));
}
/* on_ps -
* Determine colinearity by detecting a triangle inequality.
* This algorithm seems to behave nicely even with lsb residues - tgl 1997-07-09
*/
Datum
on_ps(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(on_ps_internal(pt, lseg));
}
static bool
on_ps_internal(Point *pt, LSEG *lseg)
{
return FPeq(point_dt(pt, &lseg->p[0]) + point_dt(pt, &lseg->p[1]),
point_dt(&lseg->p[0], &lseg->p[1]));
}
Datum
on_pb(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(pt->x <= box->high.x && pt->x >= box->low.x &&
pt->y <= box->high.y && pt->y >= box->low.y);
}
Datum
box_contain_pt(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *pt = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(pt->x <= box->high.x && pt->x >= box->low.x &&
pt->y <= box->high.y && pt->y >= box->low.y);
}
/* on_ppath -
* Whether a point lies within (on) a polyline.
* If open, we have to (groan) check each segment.
* (uses same algorithm as for point intersecting segment - tgl 1997-07-09)
* If closed, we use the old O(n) ray method for point-in-polygon.
* The ray is horizontal, from pt out to the right.
* Each segment that crosses the ray counts as an
* intersection; note that an endpoint or edge may touch
* but not cross.
* (we can do p-in-p in lg(n), but it takes preprocessing)
*/
Datum
on_ppath(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
PATH *path = PG_GETARG_PATH_P(1);
int i,
n;
double a,
b;
/*-- OPEN --*/
if (!path->closed)
{
n = path->npts - 1;
a = point_dt(pt, &path->p[0]);
for (i = 0; i < n; i++)
{
b = point_dt(pt, &path->p[i + 1]);
if (FPeq(a + b,
point_dt(&path->p[i], &path->p[i + 1])))
PG_RETURN_BOOL(true);
a = b;
}
PG_RETURN_BOOL(false);
}
/*-- CLOSED --*/
PG_RETURN_BOOL(point_inside(pt, path->npts, path->p) != 0);
}
Datum
on_sl(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl,
PointPGetDatum(&lseg->p[0]),
LinePGetDatum(line))) &&
DatumGetBool(DirectFunctionCall2(on_pl,
PointPGetDatum(&lseg->p[1]),
LinePGetDatum(line))));
}
Datum
on_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) &&
DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))));
}
/*---------------------------------------------------------------------
* inter_
* Whether one object intersects another.
*-------------------------------------------------------------------*/
Datum
inter_sl(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(has_interpt_sl(lseg, line));
}
/* inter_sb()
* Do line segment and box intersect?
*
* Segment completely inside box counts as intersection.
* If you want only segments crossing box boundaries,
* try converting box to path first.
*
* Optimize for non-intersection by checking for box intersection first.
* - thomas 1998-01-30
*/
Datum
inter_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
BOX lbox;
LSEG bseg;
Point point;
lbox.low.x = Min(lseg->p[0].x, lseg->p[1].x);
lbox.low.y = Min(lseg->p[0].y, lseg->p[1].y);
lbox.high.x = Max(lseg->p[0].x, lseg->p[1].x);
lbox.high.y = Max(lseg->p[0].y, lseg->p[1].y);
/* nothing close to overlap? then not going to intersect */
if (!box_ov(&lbox, box))
PG_RETURN_BOOL(false);
/* an endpoint of segment is inside box? then clearly intersects */
if (DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) ||
DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))))
PG_RETURN_BOOL(true);
/* pairwise check lseg intersections */
point.x = box->low.x;
point.y = box->high.y;
statlseg_construct(&bseg, &box->low, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
statlseg_construct(&bseg, &box->high, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
point.x = box->high.x;
point.y = box->low.y;
statlseg_construct(&bseg, &box->low, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
statlseg_construct(&bseg, &box->high, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
/* if we dropped through, no two segs intersected */
PG_RETURN_BOOL(false);
}
/* inter_lb()
* Do line and box intersect?
*/
Datum
inter_lb(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
LSEG bseg;
Point p1,
p2;
/* pairwise check lseg intersections */
p1.x = box->low.x;
p1.y = box->low.y;
p2.x = box->low.x;
p2.y = box->high.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
p1.x = box->high.x;
p1.y = box->high.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
p2.x = box->high.x;
p2.y = box->low.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
p1.x = box->low.x;
p1.y = box->low.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
/* if we dropped through, no intersection */
PG_RETURN_BOOL(false);
}
/*------------------------------------------------------------------
* The following routines define a data type and operator class for
* POLYGONS .... Part of which (the polygon's bounding box) is built on
* top of the BOX data type.
*
* make_bound_box - create the bounding box for the input polygon
*------------------------------------------------------------------*/
/*---------------------------------------------------------------------
* Make the smallest bounding box for the given polygon.
*---------------------------------------------------------------------*/
static void
make_bound_box(POLYGON *poly)
{
int i;
double x1,
y1,
x2,
y2;
if (poly->npts > 0)
{
x2 = x1 = poly->p[0].x;
y2 = y1 = poly->p[0].y;
for (i = 1; i < poly->npts; i++)
{
if (poly->p[i].x < x1)
x1 = poly->p[i].x;
if (poly->p[i].x > x2)
x2 = poly->p[i].x;
if (poly->p[i].y < y1)
y1 = poly->p[i].y;
if (poly->p[i].y > y2)
y2 = poly->p[i].y;
}
box_fill(&(poly->boundbox), x1, x2, y1, y2);
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot create bounding box for empty polygon")));
}
/*------------------------------------------------------------------
* poly_in - read in the polygon from a string specification
*
* External format:
* "((x0,y0),...,(xn,yn))"
* "x0,y0,...,xn,yn"
* also supports the older style "(x1,...,xn,y1,...yn)"
*------------------------------------------------------------------*/
Datum
poly_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
POLYGON *poly;
int npts;
int size;
int isopen;
char *s;
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
SET_VARSIZE(poly, size);
poly->npts = npts;
if ((!path_decode(FALSE, npts, str, &isopen, &s, &(poly->p[0])))
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/*---------------------------------------------------------------
* poly_out - convert internal POLYGON representation to the
* character string format "((f8,f8),...,(f8,f8))"
*---------------------------------------------------------------*/
Datum
poly_out(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
PG_RETURN_CSTRING(path_encode(PATH_CLOSED, poly->npts, poly->p));
}
/*
* poly_recv - converts external binary format to polygon
*
* External representation is int32 number of points, and the points.
* We recompute the bounding box on read, instead of trusting it to
* be valid. (Checking it would take just as long, so may as well
* omit it from external representation.)
*/
Datum
poly_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
POLYGON *poly;
int32 npts;
int32 i;
int size;
npts = pq_getmsgint(buf, sizeof(int32));
if (npts <= 0 || npts >= (int32) ((INT_MAX - offsetof(POLYGON, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid number of points in external \"polygon\" value")));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
SET_VARSIZE(poly, size);
poly->npts = npts;
for (i = 0; i < npts; i++)
{
poly->p[i].x = pq_getmsgfloat8(buf);
poly->p[i].y = pq_getmsgfloat8(buf);
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/*
* poly_send - converts polygon to binary format
*/
Datum
poly_send(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
StringInfoData buf;
int32 i;
pq_begintypsend(&buf);
pq_sendint(&buf, poly->npts, sizeof(int32));
for (i = 0; i < poly->npts; i++)
{
pq_sendfloat8(&buf, poly->p[i].x);
pq_sendfloat8(&buf, poly->p[i].y);
}
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*-------------------------------------------------------
* Is polygon A strictly left of polygon B? i.e. is
* the right most point of A left of the left most point
* of B?
*-------------------------------------------------------*/
Datum
poly_left(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.x < polyb->boundbox.low.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or left of polygon B? i.e. is
* the right most point of A at or left of the right most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overleft(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.x <= polyb->boundbox.high.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A strictly right of polygon B? i.e. is
* the left most point of A right of the right most point
* of B?
*-------------------------------------------------------*/
Datum
poly_right(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.x > polyb->boundbox.high.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or right of polygon B? i.e. is
* the left most point of A at or right of the left most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overright(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.x >= polyb->boundbox.low.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A strictly below polygon B? i.e. is
* the upper most point of A below the lower most point
* of B?
*-------------------------------------------------------*/
Datum
poly_below(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.y < polyb->boundbox.low.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or below polygon B? i.e. is
* the upper most point of A at or below the upper most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overbelow(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.y <= polyb->boundbox.high.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A strictly above polygon B? i.e. is
* the lower most point of A above the upper most point
* of B?
*-------------------------------------------------------*/
Datum
poly_above(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.y > polyb->boundbox.high.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or above polygon B? i.e. is
* the lower most point of A at or above the lower most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overabove(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.y >= polyb->boundbox.low.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A the same as polygon B? i.e. are all the
* points the same?
* Check all points for matches in both forward and reverse
* direction since polygons are non-directional and are
* closed shapes.
*-------------------------------------------------------*/
Datum
poly_same(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
if (polya->npts != polyb->npts)
result = false;
else
result = plist_same(polya->npts, polya->p, polyb->p);
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-----------------------------------------------------------------
* Determine if polygon A overlaps polygon B
*-----------------------------------------------------------------*/
Datum
poly_overlap(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
/* Quick check by bounding box */
result = (polya->npts > 0 && polyb->npts > 0 &&
box_ov(&polya->boundbox, &polyb->boundbox)) ? true : false;
/*
* Brute-force algorithm - try to find intersected edges, if so then
* polygons are overlapped else check is one polygon inside other or not
* by testing single point of them.
*/
if (result)
{
int ia,
ib;
LSEG sa,
sb;
/* Init first of polya's edge with last point */
sa.p[0] = polya->p[polya->npts - 1];
result = false;
for (ia = 0; ia < polya->npts && result == false; ia++)
{
/* Second point of polya's edge is a current one */
sa.p[1] = polya->p[ia];
/* Init first of polyb's edge with last point */
sb.p[0] = polyb->p[polyb->npts - 1];
for (ib = 0; ib < polyb->npts && result == false; ib++)
{
sb.p[1] = polyb->p[ib];
result = lseg_intersect_internal(&sa, &sb);
sb.p[0] = sb.p[1];
}
/*
* move current endpoint to the first point of next edge
*/
sa.p[0] = sa.p[1];
}
if (result == false)
{
result = (point_inside(polya->p, polyb->npts, polyb->p)
||
point_inside(polyb->p, polya->npts, polya->p));
}
}
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*
* Tests special kind of segment for in/out of polygon.
* Special kind means:
* - point a should be on segment s
* - segment (a,b) should not be contained by s
* Returns true if:
* - segment (a,b) is collinear to s and (a,b) is in polygon
* - segment (a,b) s not collinear to s. Note: that doesn't
* mean that segment is in polygon!
*/
static bool
touched_lseg_inside_poly(Point *a, Point *b, LSEG *s, POLYGON *poly, int start)
{
/* point a is on s, b is not */
LSEG t;
t.p[0] = *a;
t.p[1] = *b;
#define POINTEQ(pt1, pt2) (FPeq((pt1)->x, (pt2)->x) && FPeq((pt1)->y, (pt2)->y))
if (POINTEQ(a, s->p))
{
if (on_ps_internal(s->p + 1, &t))
return lseg_inside_poly(b, s->p + 1, poly, start);
}
else if (POINTEQ(a, s->p + 1))
{
if (on_ps_internal(s->p, &t))
return lseg_inside_poly(b, s->p, poly, start);
}
else if (on_ps_internal(s->p, &t))
{
return lseg_inside_poly(b, s->p, poly, start);
}
else if (on_ps_internal(s->p + 1, &t))
{
return lseg_inside_poly(b, s->p + 1, poly, start);
}
return true; /* may be not true, but that will check later */
}
/*
* Returns true if segment (a,b) is in polygon, option
* start is used for optimization - function checks
* polygon's edges started from start
*/
static bool
lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start)
{
LSEG s,
t;
int i;
bool res = true,
intersection = false;
t.p[0] = *a;
t.p[1] = *b;
s.p[0] = poly->p[(start == 0) ? (poly->npts - 1) : (start - 1)];
for (i = start; i < poly->npts && res; i++)
{
Point *interpt;
s.p[1] = poly->p[i];
if (on_ps_internal(t.p, &s))
{
if (on_ps_internal(t.p + 1, &s))
return true; /* t is contained by s */
/* Y-cross */
res = touched_lseg_inside_poly(t.p, t.p + 1, &s, poly, i + 1);
}
else if (on_ps_internal(t.p + 1, &s))
{
/* Y-cross */
res = touched_lseg_inside_poly(t.p + 1, t.p, &s, poly, i + 1);
}
else if ((interpt = lseg_interpt_internal(&t, &s)) != NULL)
{
/*
* segments are X-crossing, go to check each subsegment
*/
intersection = true;
res = lseg_inside_poly(t.p, interpt, poly, i + 1);
if (res)
res = lseg_inside_poly(t.p + 1, interpt, poly, i + 1);
pfree(interpt);
}
s.p[0] = s.p[1];
}
if (res && !intersection)
{
Point p;
/*
* if X-intersection wasn't found then check central point of tested
* segment. In opposite case we already check all subsegments
*/
p.x = (t.p[0].x + t.p[1].x) / 2.0;
p.y = (t.p[0].y + t.p[1].y) / 2.0;
res = point_inside(&p, poly->npts, poly->p);
}
return res;
}
/*-----------------------------------------------------------------
* Determine if polygon A contains polygon B.
*-----------------------------------------------------------------*/
Datum
poly_contain(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
/*
* Quick check to see if bounding box is contained.
*/
if (polya->npts > 0 && polyb->npts > 0 &&
DatumGetBool(DirectFunctionCall2(box_contain,
BoxPGetDatum(&polya->boundbox),
BoxPGetDatum(&polyb->boundbox))))
{
int i;
LSEG s;
s.p[0] = polyb->p[polyb->npts - 1];
result = true;
for (i = 0; i < polyb->npts && result; i++)
{
s.p[1] = polyb->p[i];
result = lseg_inside_poly(s.p, s.p + 1, polya, 0);
s.p[0] = s.p[1];
}
}
else
{
result = false;
}
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-----------------------------------------------------------------
* Determine if polygon A is contained by polygon B
*-----------------------------------------------------------------*/
Datum
poly_contained(PG_FUNCTION_ARGS)
{
Datum polya = PG_GETARG_DATUM(0);
Datum polyb = PG_GETARG_DATUM(1);
/* Just switch the arguments and pass it off to poly_contain */
PG_RETURN_DATUM(DirectFunctionCall2(poly_contain, polyb, polya));
}
Datum
poly_contain_pt(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(point_inside(p, poly->npts, poly->p) != 0);
}
Datum
pt_contained_poly(PG_FUNCTION_ARGS)
{
Point *p = PG_GETARG_POINT_P(0);
POLYGON *poly = PG_GETARG_POLYGON_P(1);
PG_RETURN_BOOL(point_inside(p, poly->npts, poly->p) != 0);
}
Datum
poly_distance(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"poly_distance\" not implemented")));
PG_RETURN_NULL();
}
/***********************************************************************
**
** Routines for 2D points.
**
***********************************************************************/
Datum
construct_point(PG_FUNCTION_ARGS)
{
float8 x = PG_GETARG_FLOAT8(0);
float8 y = PG_GETARG_FLOAT8(1);
PG_RETURN_POINT_P(point_construct(x, y));
}
Datum
point_add(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (p1->x + p2->x);
result->y = (p1->y + p2->y);
PG_RETURN_POINT_P(result);
}
Datum
point_sub(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (p1->x - p2->x);
result->y = (p1->y - p2->y);
PG_RETURN_POINT_P(result);
}
Datum
point_mul(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (p1->x * p2->x) - (p1->y * p2->y);
result->y = (p1->x * p2->y) + (p1->y * p2->x);
PG_RETURN_POINT_P(result);
}
Datum
point_div(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
double div;
result = (Point *) palloc(sizeof(Point));
div = (p2->x * p2->x) + (p2->y * p2->y);
if (div == 0.0)
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
result->x = ((p1->x * p2->x) + (p1->y * p2->y)) / div;
result->y = ((p2->x * p1->y) - (p2->y * p1->x)) / div;
PG_RETURN_POINT_P(result);
}
/***********************************************************************
**
** Routines for 2D boxes.
**
***********************************************************************/
Datum
points_box(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOX_P(box_construct(p1->x, p2->x, p1->y, p2->y));
}
Datum
box_add(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOX_P(box_construct((box->high.x + p->x),
(box->low.x + p->x),
(box->high.y + p->y),
(box->low.y + p->y)));
}
Datum
box_sub(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOX_P(box_construct((box->high.x - p->x),
(box->low.x - p->x),
(box->high.y - p->y),
(box->low.y - p->y)));
}
Datum
box_mul(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
BOX *result;
Point *high,
*low;
high = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&box->high),
PointPGetDatum(p)));
low = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&box->low),
PointPGetDatum(p)));
result = box_construct(high->x, low->x, high->y, low->y);
PG_RETURN_BOX_P(result);
}
Datum
box_div(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
BOX *result;
Point *high,
*low;
high = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&box->high),
PointPGetDatum(p)));
low = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&box->low),
PointPGetDatum(p)));
result = box_construct(high->x, low->x, high->y, low->y);
PG_RETURN_BOX_P(result);
}
/***********************************************************************
**
** Routines for 2D paths.
**
***********************************************************************/
/* path_add()
* Concatenate two paths (only if they are both open).
*/
Datum
path_add(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PATH *result;
int size,
base_size;
int i;
if (p1->closed || p2->closed)
PG_RETURN_NULL();
base_size = sizeof(p1->p[0]) * (p1->npts + p2->npts);
size = offsetof(PATH, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / sizeof(p1->p[0]) != (p1->npts + p2->npts) ||
size <= base_size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
result = (PATH *) palloc(size);
SET_VARSIZE(result, size);
result->npts = (p1->npts + p2->npts);
result->closed = p1->closed;
/* prevent instability in unused pad bytes */
result->dummy = 0;
for (i = 0; i < p1->npts; i++)
{
result->p[i].x = p1->p[i].x;
result->p[i].y = p1->p[i].y;
}
for (i = 0; i < p2->npts; i++)
{
result->p[i + p1->npts].x = p2->p[i].x;
result->p[i + p1->npts].y = p2->p[i].y;
}
PG_RETURN_PATH_P(result);
}
/* path_add_pt()
* Translation operators.
*/
Datum
path_add_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
int i;
for (i = 0; i < path->npts; i++)
{
path->p[i].x += point->x;
path->p[i].y += point->y;
}
PG_RETURN_PATH_P(path);
}
Datum
path_sub_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
int i;
for (i = 0; i < path->npts; i++)
{
path->p[i].x -= point->x;
path->p[i].y -= point->y;
}
PG_RETURN_PATH_P(path);
}
/* path_mul_pt()
* Rotation and scaling operators.
*/
Datum
path_mul_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
Point *p;
int i;
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
}
PG_RETURN_PATH_P(path);
}
Datum
path_div_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
Point *p;
int i;
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
}
PG_RETURN_PATH_P(path);
}
Datum
path_center(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
PATH *path = PG_GETARG_PATH_P(0);
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"path_center\" not implemented")));
PG_RETURN_NULL();
}
Datum
path_poly(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
POLYGON *poly;
int size;
int i;
/* This is not very consistent --- other similar cases return NULL ... */
if (!path->closed)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("open path cannot be converted to polygon")));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * path->npts;
poly = (POLYGON *) palloc(size);
SET_VARSIZE(poly, size);
poly->npts = path->npts;
for (i = 0; i < path->npts; i++)
{
poly->p[i].x = path->p[i].x;
poly->p[i].y = path->p[i].y;
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/***********************************************************************
**
** Routines for 2D polygons.
**
***********************************************************************/
Datum
poly_npoints(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
PG_RETURN_INT32(poly->npts);
}
Datum
poly_center(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
Datum result;
CIRCLE *circle;
circle = DatumGetCircleP(DirectFunctionCall1(poly_circle,
PolygonPGetDatum(poly)));
result = DirectFunctionCall1(circle_center,
CirclePGetDatum(circle));
PG_RETURN_DATUM(result);
}
Datum
poly_box(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
BOX *box;
if (poly->npts < 1)
PG_RETURN_NULL();
box = box_copy(&poly->boundbox);
PG_RETURN_BOX_P(box);
}
/* box_poly()
* Convert a box to a polygon.
*/
Datum
box_poly(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
POLYGON *poly;
int size;
/* map four corners of the box to a polygon */
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * 4;
poly = (POLYGON *) palloc(size);
SET_VARSIZE(poly, size);
poly->npts = 4;
poly->p[0].x = box->low.x;
poly->p[0].y = box->low.y;
poly->p[1].x = box->low.x;
poly->p[1].y = box->high.y;
poly->p[2].x = box->high.x;
poly->p[2].y = box->high.y;
poly->p[3].x = box->high.x;
poly->p[3].y = box->low.y;
box_fill(&poly->boundbox, box->high.x, box->low.x,
box->high.y, box->low.y);
PG_RETURN_POLYGON_P(poly);
}
Datum
poly_path(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
PATH *path;
int size;
int i;
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * poly->npts;
path = (PATH *) palloc(size);
SET_VARSIZE(path, size);
path->npts = poly->npts;
path->closed = TRUE;
/* prevent instability in unused pad bytes */
path->dummy = 0;
for (i = 0; i < poly->npts; i++)
{
path->p[i].x = poly->p[i].x;
path->p[i].y = poly->p[i].y;
}
PG_RETURN_PATH_P(path);
}
/***********************************************************************
**
** Routines for circles.
**
***********************************************************************/
/*----------------------------------------------------------
* Formatting and conversion routines.
*---------------------------------------------------------*/
/* circle_in - convert a string to internal form.
*
* External format: (center and radius of circle)
* "((f8,f8)<f8>)"
* also supports quick entry style "(f8,f8,f8)"
*/
Datum
circle_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
CIRCLE *circle;
char *s,
*cp;
int depth = 0;
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
s = str;
while (isspace((unsigned char) *s))
s++;
if ((*s == LDELIM_C) || (*s == LDELIM))
{
depth++;
cp = (s + 1);
while (isspace((unsigned char) *cp))
cp++;
if (*cp == LDELIM)
s = cp;
}
if (!pair_decode(s, &circle->center.x, &circle->center.y, &s))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
if (*s == DELIM)
s++;
while (isspace((unsigned char) *s))
s++;
if ((!single_decode(s, &circle->radius, &s)) || (circle->radius < 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
while (depth > 0)
{
if ((*s == RDELIM)
|| ((*s == RDELIM_C) && (depth == 1)))
{
depth--;
s++;
while (isspace((unsigned char) *s))
s++;
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
}
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
PG_RETURN_CIRCLE_P(circle);
}
/* circle_out - convert a circle to external form.
*/
Datum
circle_out(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
char *result;
char *cp;
result = palloc(2 * P_MAXLEN + 6);
cp = result;
*cp++ = LDELIM_C;
*cp++ = LDELIM;
if (!pair_encode(circle->center.x, circle->center.y, cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not format \"circle\" value")));
cp += strlen(cp);
*cp++ = RDELIM;
*cp++ = DELIM;
if (!single_encode(circle->radius, cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not format \"circle\" value")));
cp += strlen(cp);
*cp++ = RDELIM_C;
*cp = '\0';
PG_RETURN_CSTRING(result);
}
/*
* circle_recv - converts external binary format to circle
*/
Datum
circle_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
CIRCLE *circle;
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
circle->center.x = pq_getmsgfloat8(buf);
circle->center.y = pq_getmsgfloat8(buf);
circle->radius = pq_getmsgfloat8(buf);
if (circle->radius < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid radius in external \"circle\" value")));
PG_RETURN_CIRCLE_P(circle);
}
/*
* circle_send - converts circle to binary format
*/
Datum
circle_send(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, circle->center.x);
pq_sendfloat8(&buf, circle->center.y);
pq_sendfloat8(&buf, circle->radius);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Relational operators for CIRCLEs.
* <, >, <=, >=, and == are based on circle area.
*---------------------------------------------------------*/
/* circles identical?
*/
Datum
circle_same(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPeq(circle1->radius, circle2->radius) &&
FPeq(circle1->center.x, circle2->center.x) &&
FPeq(circle1->center.y, circle2->center.y));
}
/* circle_overlap - does circle1 overlap circle2?
*/
Datum
circle_overlap(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle(point_dt(&circle1->center, &circle2->center),
circle1->radius + circle2->radius));
}
/* circle_overleft - is the right edge of circle1 at or left of
* the right edge of circle2?
*/
Datum
circle_overleft(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((circle1->center.x + circle1->radius),
(circle2->center.x + circle2->radius)));
}
/* circle_left - is circle1 strictly left of circle2?
*/
Datum
circle_left(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPlt((circle1->center.x + circle1->radius),
(circle2->center.x - circle2->radius)));
}
/* circle_right - is circle1 strictly right of circle2?
*/
Datum
circle_right(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPgt((circle1->center.x - circle1->radius),
(circle2->center.x + circle2->radius)));
}
/* circle_overright - is the left edge of circle1 at or right of
* the left edge of circle2?
*/
Datum
circle_overright(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPge((circle1->center.x - circle1->radius),
(circle2->center.x - circle2->radius)));
}
/* circle_contained - is circle1 contained by circle2?
*/
Datum
circle_contained(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((point_dt(&circle1->center, &circle2->center) + circle1->radius), circle2->radius));
}
/* circle_contain - does circle1 contain circle2?
*/
Datum
circle_contain(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((point_dt(&circle1->center, &circle2->center) + circle2->radius), circle1->radius));
}
/* circle_below - is circle1 strictly below circle2?
*/
Datum
circle_below(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPlt((circle1->center.y + circle1->radius),
(circle2->center.y - circle2->radius)));
}
/* circle_above - is circle1 strictly above circle2?
*/
Datum
circle_above(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPgt((circle1->center.y - circle1->radius),
(circle2->center.y + circle2->radius)));
}
/* circle_overbelow - is the upper edge of circle1 at or below
* the upper edge of circle2?
*/
Datum
circle_overbelow(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((circle1->center.y + circle1->radius),
(circle2->center.y + circle2->radius)));
}
/* circle_overabove - is the lower edge of circle1 at or above
* the lower edge of circle2?
*/
Datum
circle_overabove(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPge((circle1->center.y - circle1->radius),
(circle2->center.y - circle2->radius)));
}
/* circle_relop - is area(circle1) relop area(circle2), within
* our accuracy constraint?
*/
Datum
circle_eq(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPeq(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_ne(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPne(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_lt(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPlt(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_gt(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPgt(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_le(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_ge(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPge(circle_ar(circle1), circle_ar(circle2)));
}
/*----------------------------------------------------------
* "Arithmetic" operators on circles.
*---------------------------------------------------------*/
static CIRCLE *
circle_copy(CIRCLE *circle)
{
CIRCLE *result;
if (!PointerIsValid(circle))
return NULL;
result = (CIRCLE *) palloc(sizeof(CIRCLE));
memcpy((char *) result, (char *) circle, sizeof(CIRCLE));
return result;
}
/* circle_add_pt()
* Translation operator.
*/
Datum
circle_add_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
result = circle_copy(circle);
result->center.x += point->x;
result->center.y += point->y;
PG_RETURN_CIRCLE_P(result);
}
Datum
circle_sub_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
result = circle_copy(circle);
result->center.x -= point->x;
result->center.y -= point->y;
PG_RETURN_CIRCLE_P(result);
}
/* circle_mul_pt()
* Rotation and scaling operators.
*/
Datum
circle_mul_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
Point *p;
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
result->radius *= HYPOT(point->x, point->y);
PG_RETURN_CIRCLE_P(result);
}
Datum
circle_div_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
Point *p;
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
result->radius /= HYPOT(point->x, point->y);
PG_RETURN_CIRCLE_P(result);
}
/* circle_area - returns the area of the circle.
*/
Datum
circle_area(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
PG_RETURN_FLOAT8(circle_ar(circle));
}
/* circle_diameter - returns the diameter of the circle.
*/
Datum
circle_diameter(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
PG_RETURN_FLOAT8(2 * circle->radius);
}
/* circle_radius - returns the radius of the circle.
*/
Datum
circle_radius(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
PG_RETURN_FLOAT8(circle->radius);
}
/* circle_distance - returns the distance between
* two circles.
*/
Datum
circle_distance(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
float8 result;
result = point_dt(&circle1->center, &circle2->center)
- (circle1->radius + circle2->radius);
if (result < 0)
result = 0;
PG_RETURN_FLOAT8(result);
}
Datum
circle_contain_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
double d;
d = point_dt(&circle->center, point);
PG_RETURN_BOOL(d <= circle->radius);
}
Datum
pt_contained_circle(PG_FUNCTION_ARGS)
{
Point *point = PG_GETARG_POINT_P(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
double d;
d = point_dt(&circle->center, point);
PG_RETURN_BOOL(d <= circle->radius);
}
/* dist_pc - returns the distance between
* a point and a circle.
*/
Datum
dist_pc(PG_FUNCTION_ARGS)
{
Point *point = PG_GETARG_POINT_P(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
float8 result;
result = point_dt(point, &circle->center) - circle->radius;
if (result < 0)
result = 0;
PG_RETURN_FLOAT8(result);
}
/* circle_center - returns the center point of the circle.
*/
Datum
circle_center(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = circle->center.x;
result->y = circle->center.y;
PG_RETURN_POINT_P(result);
}
/* circle_ar - returns the area of the circle.
*/
static double
circle_ar(CIRCLE *circle)
{
return M_PI * (circle->radius * circle->radius);
}
/*----------------------------------------------------------
* Conversion operators.
*---------------------------------------------------------*/
Datum
cr_circle(PG_FUNCTION_ARGS)
{
Point *center = PG_GETARG_POINT_P(0);
float8 radius = PG_GETARG_FLOAT8(1);
CIRCLE *result;
result = (CIRCLE *) palloc(sizeof(CIRCLE));
result->center.x = center->x;
result->center.y = center->y;
result->radius = radius;
PG_RETURN_CIRCLE_P(result);
}
Datum
circle_box(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
BOX *box;
double delta;
box = (BOX *) palloc(sizeof(BOX));
delta = circle->radius / sqrt(2.0);
box->high.x = circle->center.x + delta;
box->low.x = circle->center.x - delta;
box->high.y = circle->center.y + delta;
box->low.y = circle->center.y - delta;
PG_RETURN_BOX_P(box);
}
/* box_circle()
* Convert a box to a circle.
*/
Datum
box_circle(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
CIRCLE *circle;
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
circle->center.x = (box->high.x + box->low.x) / 2;
circle->center.y = (box->high.y + box->low.y) / 2;
circle->radius = point_dt(&circle->center, &box->high);
PG_RETURN_CIRCLE_P(circle);
}
Datum
circle_poly(PG_FUNCTION_ARGS)
{
int32 npts = PG_GETARG_INT32(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
POLYGON *poly;
int base_size,
size;
int i;
double angle;
double anglestep;
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot convert circle with radius zero to polygon")));
if (npts < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("must request at least 2 points")));
base_size = sizeof(poly->p[0]) * npts;
size = offsetof(POLYGON, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(poly->p[0]) || size <= base_size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
poly = (POLYGON *) palloc0(size); /* zero any holes */
SET_VARSIZE(poly, size);
poly->npts = npts;
anglestep = (2.0 * M_PI) / npts;
for (i = 0; i < npts; i++)
{
angle = i * anglestep;
poly->p[i].x = circle->center.x - (circle->radius * cos(angle));
poly->p[i].y = circle->center.y + (circle->radius * sin(angle));
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/* poly_circle - convert polygon to circle
*
* XXX This algorithm should use weighted means of line segments
* rather than straight average values of points - tgl 97/01/21.
*/
Datum
poly_circle(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
CIRCLE *circle;
int i;
if (poly->npts < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot convert empty polygon to circle")));
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
circle->center.x = 0;
circle->center.y = 0;
circle->radius = 0;
for (i = 0; i < poly->npts; i++)
{
circle->center.x += poly->p[i].x;
circle->center.y += poly->p[i].y;
}
circle->center.x /= poly->npts;
circle->center.y /= poly->npts;
for (i = 0; i < poly->npts; i++)
circle->radius += point_dt(&poly->p[i], &circle->center);
circle->radius /= poly->npts;
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot convert empty polygon to circle")));
PG_RETURN_CIRCLE_P(circle);
}
/***********************************************************************
**
** Private routines for multiple types.
**
***********************************************************************/
/*
* Test to see if the point is inside the polygon, returns 1/0, or 2 if
* the point is on the polygon.
* Code adapted but not copied from integer-based routines in WN: A
* Server for the HTTP
* version 1.15.1, file wn/image.c
* http://hopf.math.northwestern.edu/index.html
* Description of algorithm: http://www.linuxjournal.com/article/2197
* http://www.linuxjournal.com/article/2029
*/
#define POINT_ON_POLYGON INT_MAX
static int
point_inside(Point *p, int npts, Point *plist)
{
double x0,
y0;
double prev_x,
prev_y;
int i = 0;
double x,
y;
int cross,
total_cross = 0;
if (npts <= 0)
return 0;
/* compute first polygon point relative to single point */
x0 = plist[0].x - p->x;
y0 = plist[0].y - p->y;
prev_x = x0;
prev_y = y0;
/* loop over polygon points and aggregate total_cross */
for (i = 1; i < npts; i++)
{
/* compute next polygon point relative to single point */
x = plist[i].x - p->x;
y = plist[i].y - p->y;
/* compute previous to current point crossing */
if ((cross = lseg_crossing(x, y, prev_x, prev_y)) == POINT_ON_POLYGON)
return 2;
total_cross += cross;
prev_x = x;
prev_y = y;
}
/* now do the first point */
if ((cross = lseg_crossing(x0, y0, prev_x, prev_y)) == POINT_ON_POLYGON)
return 2;
total_cross += cross;
if (total_cross != 0)
return 1;
return 0;
}
/* lseg_crossing()
* Returns +/-2 if line segment crosses the positive X-axis in a +/- direction.
* Returns +/-1 if one point is on the positive X-axis.
* Returns 0 if both points are on the positive X-axis, or there is no crossing.
* Returns POINT_ON_POLYGON if the segment contains (0,0).
* Wow, that is one confusing API, but it is used above, and when summed,
* can tell is if a point is in a polygon.
*/
static int
lseg_crossing(double x, double y, double prev_x, double prev_y)
{
double z;
int y_sign;
if (FPzero(y))
{ /* y == 0, on X axis */
if (FPzero(x)) /* (x,y) is (0,0)? */
return POINT_ON_POLYGON;
else if (FPgt(x, 0))
{ /* x > 0 */
if (FPzero(prev_y)) /* y and prev_y are zero */
/* prev_x > 0? */
return FPgt(prev_x, 0) ? 0 : POINT_ON_POLYGON;
return FPlt(prev_y, 0) ? 1 : -1;
}
else
{ /* x < 0, x not on positive X axis */
if (FPzero(prev_y))
/* prev_x < 0? */
return FPlt(prev_x, 0) ? 0 : POINT_ON_POLYGON;
return 0;
}
}
else
{ /* y != 0 */
/* compute y crossing direction from previous point */
y_sign = FPgt(y, 0) ? 1 : -1;
if (FPzero(prev_y))
/* previous point was on X axis, so new point is either off or on */
return FPlt(prev_x, 0) ? 0 : y_sign;
else if (FPgt(y_sign * prev_y, 0))
/* both above or below X axis */
return 0; /* same sign */
else
{ /* y and prev_y cross X-axis */
if (FPge(x, 0) && FPgt(prev_x, 0))
/* both non-negative so cross positive X-axis */
return 2 * y_sign;
if (FPlt(x, 0) && FPle(prev_x, 0))
/* both non-positive so do not cross positive X-axis */
return 0;
/* x and y cross axises, see URL above point_inside() */
z = (x - prev_x) * y - (y - prev_y) * x;
if (FPzero(z))
return POINT_ON_POLYGON;
return FPgt((y_sign * z), 0) ? 0 : 2 * y_sign;
}
}
}
static bool
plist_same(int npts, Point *p1, Point *p2)
{
int i,
ii,
j;
/* find match for first point */
for (i = 0; i < npts; i++)
{
if ((FPeq(p2[i].x, p1[0].x))
&& (FPeq(p2[i].y, p1[0].y)))
{
/* match found? then look forward through remaining points */
for (ii = 1, j = i + 1; ii < npts; ii++, j++)
{
if (j >= npts)
j = 0;
if ((!FPeq(p2[j].x, p1[ii].x))
|| (!FPeq(p2[j].y, p1[ii].y)))
{
#ifdef GEODEBUG
printf("plist_same- %d failed forward match with %d\n", j, ii);
#endif
break;
}
}
#ifdef GEODEBUG
printf("plist_same- ii = %d/%d after forward match\n", ii, npts);
#endif
if (ii == npts)
return TRUE;
/* match not found forwards? then look backwards */
for (ii = 1, j = i - 1; ii < npts; ii++, j--)
{
if (j < 0)
j = (npts - 1);
if ((!FPeq(p2[j].x, p1[ii].x))
|| (!FPeq(p2[j].y, p1[ii].y)))
{
#ifdef GEODEBUG
printf("plist_same- %d failed reverse match with %d\n", j, ii);
#endif
break;
}
}
#ifdef GEODEBUG
printf("plist_same- ii = %d/%d after reverse match\n", ii, npts);
#endif
if (ii == npts)
return TRUE;
}
}
return FALSE;
}
/*-------------------------------------------------------------------------
* Determine the hypotenuse.
*
* If required, x and y are swapped to make x the larger number. The
* traditional formula of x^2+y^2 is rearranged to factor x outside the
* sqrt. This allows computation of the hypotenuse for significantly
* larger values, and with a higher precision than when using the naive
* formula. In particular, this cannot overflow unless the final result
* would be out-of-range.
*
* sqrt( x^2 + y^2 ) = sqrt( x^2( 1 + y^2/x^2) )
* = x * sqrt( 1 + y^2/x^2 )
* = x * sqrt( 1 + y/x * y/x )
*
* It is expected that this routine will eventually be replaced with the
* C99 hypot() function.
*
* This implementation conforms to IEEE Std 1003.1 and GLIBC, in that the
* case of hypot(inf,nan) results in INF, and not NAN.
*-----------------------------------------------------------------------
*/
double
pg_hypot(double x, double y)
{
double yx;
/* Handle INF and NaN properly */
if (isinf(x) || isinf(y))
return get_float8_infinity();
if (isnan(x) || isnan(y))
return get_float8_nan();
/* Else, drop any minus signs */
x = fabs(x);
y = fabs(y);
/* Swap x and y if needed to make x the larger one */
if (x < y)
{
double temp = x;
x = y;
y = temp;
}
/*
* If y is zero, the hypotenuse is x. This test saves a few cycles in
* such cases, but more importantly it also protects against
* divide-by-zero errors, since now x >= y.
*/
if (y == 0.0)
return x;
/* Determine the hypotenuse */
yx = y / x;
return x * sqrt(1.0 + (yx * yx));
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_8 |
crossvul-cpp_data_good_5507_0 | /* inflate.c -- zlib decompression
* Copyright (C) 1995-2012 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/*
* Change history:
*
* 1.2.beta0 24 Nov 2002
* - First version -- complete rewrite of inflate to simplify code, avoid
* creation of window when not needed, minimize use of window when it is
* needed, make inffast.c even faster, implement gzip decoding, and to
* improve code readability and style over the previous zlib inflate code
*
* 1.2.beta1 25 Nov 2002
* - Use pointers for available input and output checking in inffast.c
* - Remove input and output counters in inffast.c
* - Change inffast.c entry and loop from avail_in >= 7 to >= 6
* - Remove unnecessary second byte pull from length extra in inffast.c
* - Unroll direct copy to three copies per loop in inffast.c
*
* 1.2.beta2 4 Dec 2002
* - Change external routine names to reduce potential conflicts
* - Correct filename to inffixed.h for fixed tables in inflate.c
* - Make hbuf[] unsigned char to match parameter type in inflate.c
* - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset)
* to avoid negation problem on Alphas (64 bit) in inflate.c
*
* 1.2.beta3 22 Dec 2002
* - Add comments on state->bits assertion in inffast.c
* - Add comments on op field in inftrees.h
* - Fix bug in reuse of allocated window after inflateReset()
* - Remove bit fields--back to byte structure for speed
* - Remove distance extra == 0 check in inflate_fast()--only helps for lengths
* - Change post-increments to pre-increments in inflate_fast(), PPC biased?
* - Add compile time option, POSTINC, to use post-increments instead (Intel?)
* - Make MATCH copy in inflate() much faster for when inflate_fast() not used
* - Use local copies of stream next and avail values, as well as local bit
* buffer and bit count in inflate()--for speed when inflate_fast() not used
*
* 1.2.beta4 1 Jan 2003
* - Split ptr - 257 statements in inflate_table() to avoid compiler warnings
* - Move a comment on output buffer sizes from inffast.c to inflate.c
* - Add comments in inffast.c to introduce the inflate_fast() routine
* - Rearrange window copies in inflate_fast() for speed and simplification
* - Unroll last copy for window match in inflate_fast()
* - Use local copies of window variables in inflate_fast() for speed
* - Pull out common wnext == 0 case for speed in inflate_fast()
* - Make op and len in inflate_fast() unsigned for consistency
* - Add FAR to lcode and dcode declarations in inflate_fast()
* - Simplified bad distance check in inflate_fast()
* - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new
* source file infback.c to provide a call-back interface to inflate for
* programs like gzip and unzip -- uses window as output buffer to avoid
* window copying
*
* 1.2.beta5 1 Jan 2003
* - Improved inflateBack() interface to allow the caller to provide initial
* input in strm.
* - Fixed stored blocks bug in inflateBack()
*
* 1.2.beta6 4 Jan 2003
* - Added comments in inffast.c on effectiveness of POSTINC
* - Typecasting all around to reduce compiler warnings
* - Changed loops from while (1) or do {} while (1) to for (;;), again to
* make compilers happy
* - Changed type of window in inflateBackInit() to unsigned char *
*
* 1.2.beta7 27 Jan 2003
* - Changed many types to unsigned or unsigned short to avoid warnings
* - Added inflateCopy() function
*
* 1.2.0 9 Mar 2003
* - Changed inflateBack() interface to provide separate opaque descriptors
* for the in() and out() functions
* - Changed inflateBack() argument and in_func typedef to swap the length
* and buffer address return values for the input function
* - Check next_in and next_out for Z_NULL on entry to inflate()
*
* The history for versions after 1.2.0 are in ChangeLog in zlib distribution.
*/
#include "zutil.h"
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#ifdef MAKEFIXED
# ifndef BUILDFIXED
# define BUILDFIXED
# endif
#endif
/* function prototypes */
local void fixedtables OF((struct inflate_state FAR *state));
local int updatewindow OF((z_streamp strm, const unsigned char FAR *end,
unsigned copy));
#ifdef BUILDFIXED
void makefixed OF((void));
#endif
local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf,
unsigned len));
int ZEXPORT inflateResetKeep(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
strm->total_in = strm->total_out = state->total = 0;
strm->msg = Z_NULL;
if (state->wrap) /* to support ill-conceived Java test suite */
strm->adler = state->wrap & 1;
state->mode = HEAD;
state->last = 0;
state->havedict = 0;
state->dmax = 32768U;
state->head = Z_NULL;
state->hold = 0;
state->bits = 0;
state->lencode = state->distcode = state->next = state->codes;
state->sane = 1;
state->back = -1;
Tracev((stderr, "inflate: reset\n"));
return Z_OK;
}
int ZEXPORT inflateReset(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
state->wsize = 0;
state->whave = 0;
state->wnext = 0;
return inflateResetKeep(strm);
}
int ZEXPORT inflateReset2(strm, windowBits)
z_streamp strm;
int windowBits;
{
int wrap;
struct inflate_state FAR *state;
/* get the state */
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
/* extract wrap request from windowBits parameter */
if (windowBits < 0) {
wrap = 0;
windowBits = -windowBits;
}
else {
wrap = (windowBits >> 4) + 1;
#ifdef GUNZIP
if (windowBits < 48)
windowBits &= 15;
#endif
}
/* set number of window bits, free window if different */
if (windowBits && (windowBits < 8 || windowBits > 15))
return Z_STREAM_ERROR;
if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) {
ZFREE(strm, state->window);
state->window = Z_NULL;
}
/* update state and reset the rest of it */
state->wrap = wrap;
state->wbits = (unsigned)windowBits;
return inflateReset(strm);
}
int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size)
z_streamp strm;
int windowBits;
const char *version;
int stream_size;
{
int ret;
struct inflate_state FAR *state;
if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
stream_size != (int)(sizeof(z_stream)))
return Z_VERSION_ERROR;
if (strm == Z_NULL) return Z_STREAM_ERROR;
strm->msg = Z_NULL; /* in case we return an error */
if (strm->zalloc == (alloc_func)0) {
#ifdef Z_SOLO
return Z_STREAM_ERROR;
#else
strm->zalloc = zcalloc;
strm->opaque = (voidpf)0;
#endif
}
if (strm->zfree == (free_func)0)
#ifdef Z_SOLO
return Z_STREAM_ERROR;
#else
strm->zfree = zcfree;
#endif
state = (struct inflate_state FAR *)
ZALLOC(strm, 1, sizeof(struct inflate_state));
if (state == Z_NULL) return Z_MEM_ERROR;
Tracev((stderr, "inflate: allocated\n"));
strm->state = (struct internal_state FAR *)state;
state->window = Z_NULL;
ret = inflateReset2(strm, windowBits);
if (ret != Z_OK) {
ZFREE(strm, state);
strm->state = Z_NULL;
}
return ret;
}
int ZEXPORT inflateInit_(strm, version, stream_size)
z_streamp strm;
const char *version;
int stream_size;
{
return inflateInit2_(strm, DEF_WBITS, version, stream_size);
}
int ZEXPORT inflatePrime(strm, bits, value)
z_streamp strm;
int bits;
int value;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (bits < 0) {
state->hold = 0;
state->bits = 0;
return Z_OK;
}
if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
value &= (1L << bits) - 1;
state->hold += value << state->bits;
state->bits += bits;
return Z_OK;
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. Normally this returns fixed tables from inffixed.h.
If BUILDFIXED is defined, then instead this routine builds the tables the
first time it's called, and returns those tables the first time and
thereafter. This reduces the size of the code by about 2K bytes, in
exchange for a little execution time. However, BUILDFIXED should not be
used for threaded applications, since the rewriting of the tables and virgin
may not be thread-safe.
*/
local void fixedtables(state)
struct inflate_state FAR *state;
{
#ifdef BUILDFIXED
static int virgin = 1;
static code *lenfix, *distfix;
static code fixed[544];
/* build fixed huffman tables if first call (may not be thread safe) */
if (virgin) {
unsigned sym, bits;
static code *next;
/* literal/length table */
sym = 0;
while (sym < 144) state->lens[sym++] = 8;
while (sym < 256) state->lens[sym++] = 9;
while (sym < 280) state->lens[sym++] = 7;
while (sym < 288) state->lens[sym++] = 8;
next = fixed;
lenfix = next;
bits = 9;
inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
/* distance table */
sym = 0;
while (sym < 32) state->lens[sym++] = 5;
distfix = next;
bits = 5;
inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
/* do this just once */
virgin = 0;
}
#else /* !BUILDFIXED */
# include "inffixed.h"
#endif /* BUILDFIXED */
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
#ifdef MAKEFIXED
#include <stdio.h>
/*
Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also
defines BUILDFIXED, so the tables are built on the fly. makefixed() writes
those tables to stdout, which would be piped to inffixed.h. A small program
can simply call makefixed to do this:
void makefixed(void);
int main(void)
{
makefixed();
return 0;
}
Then that can be linked with zlib built with MAKEFIXED defined and run:
a.out > inffixed.h
*/
void makefixed()
{
unsigned low, size;
struct inflate_state state;
fixedtables(&state);
puts(" /* inffixed.h -- table for decoding fixed codes");
puts(" * Generated automatically by makefixed().");
puts(" */");
puts("");
puts(" /* WARNING: this file should *not* be used by applications.");
puts(" It is part of the implementation of this library and is");
puts(" subject to change. Applications should only use zlib.h.");
puts(" */");
puts("");
size = 1U << 9;
printf(" static const code lenfix[%u] = {", size);
low = 0;
for (;;) {
if ((low % 7) == 0) printf("\n ");
printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op,
state.lencode[low].bits, state.lencode[low].val);
if (++low == size) break;
putchar(',');
}
puts("\n };");
size = 1U << 5;
printf("\n static const code distfix[%u] = {", size);
low = 0;
for (;;) {
if ((low % 6) == 0) printf("\n ");
printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits,
state.distcode[low].val);
if (++low == size) break;
putchar(',');
}
puts("\n };");
}
#endif /* MAKEFIXED */
/*
Update the window with the last wsize (normally 32K) bytes written before
returning. If window does not exist yet, create it. This is only called
when a window is already in use, or when output has been written during this
inflate call, but the end of the deflate stream has not been reached yet.
It is also called to create a window for dictionary data when a dictionary
is loaded.
Providing output buffers larger than 32K to inflate() should provide a speed
advantage, since only the last 32K of output is copied to the sliding window
upon return from inflate(), and since all distances after the first 32K of
output will fall in the output data, making match copies simpler and faster.
The advantage may be dependent on the size of the processor's data caches.
*/
local int updatewindow(strm, end, copy)
z_streamp strm;
const Bytef *end;
unsigned copy;
{
struct inflate_state FAR *state;
unsigned dist;
state = (struct inflate_state FAR *)strm->state;
/* if it hasn't been done already, allocate space for the window */
if (state->window == Z_NULL) {
state->window = (unsigned char FAR *)
ZALLOC(strm, 1U << state->wbits,
sizeof(unsigned char));
if (state->window == Z_NULL) return 1;
}
/* if window not in use yet, initialize */
if (state->wsize == 0) {
state->wsize = 1U << state->wbits;
state->wnext = 0;
state->whave = 0;
}
/* copy state->wsize or less output bytes into the circular window */
if (copy >= state->wsize) {
zmemcpy(state->window, end - state->wsize, state->wsize);
state->wnext = 0;
state->whave = state->wsize;
}
else {
dist = state->wsize - state->wnext;
if (dist > copy) dist = copy;
zmemcpy(state->window + state->wnext, end - copy, dist);
copy -= dist;
if (copy) {
zmemcpy(state->window, end - copy, copy);
state->wnext = copy;
state->whave = state->wsize;
}
else {
state->wnext += dist;
if (state->wnext == state->wsize) state->wnext = 0;
if (state->whave < state->wsize) state->whave += dist;
}
}
return 0;
}
/* Macros for inflate(): */
/* check function to use adler32() for zlib or crc32() for gzip */
#ifdef GUNZIP
# define UPDATE(check, buf, len) \
(state->flags ? crc32(check, buf, len) : adler32(check, buf, len))
#else
# define UPDATE(check, buf, len) adler32(check, buf, len)
#endif
/* check macros for header crc */
#ifdef GUNZIP
# define CRC2(check, word) \
do { \
hbuf[0] = (unsigned char)(word); \
hbuf[1] = (unsigned char)((word) >> 8); \
check = crc32(check, hbuf, 2); \
} while (0)
# define CRC4(check, word) \
do { \
hbuf[0] = (unsigned char)(word); \
hbuf[1] = (unsigned char)((word) >> 8); \
hbuf[2] = (unsigned char)((word) >> 16); \
hbuf[3] = (unsigned char)((word) >> 24); \
check = crc32(check, hbuf, 4); \
} while (0)
#endif
/* Load registers with state in inflate() for speed */
#define LOAD() \
do { \
put = strm->next_out; \
left = strm->avail_out; \
next = strm->next_in; \
have = strm->avail_in; \
hold = state->hold; \
bits = state->bits; \
} while (0)
/* Restore state from registers in inflate() */
#define RESTORE() \
do { \
strm->next_out = put; \
strm->avail_out = left; \
strm->next_in = next; \
strm->avail_in = have; \
state->hold = hold; \
state->bits = bits; \
} while (0)
/* Clear the input bit accumulator */
#define INITBITS() \
do { \
hold = 0; \
bits = 0; \
} while (0)
/* Get a byte of input into the bit accumulator, or return from inflate()
if there is no input available. */
#define PULLBYTE() \
do { \
if (have == 0) goto inf_leave; \
have--; \
hold += (unsigned long)(*next++) << bits; \
bits += 8; \
} while (0)
/* Assure that there are at least n bits in the bit accumulator. If there is
not enough available input to do that, then return from inflate(). */
#define NEEDBITS(n) \
do { \
while (bits < (unsigned)(n)) \
PULLBYTE(); \
} while (0)
/* Return the low n bits of the bit accumulator (n < 16) */
#define BITS(n) \
((unsigned)hold & ((1U << (n)) - 1))
/* Remove n bits from the bit accumulator */
#define DROPBITS(n) \
do { \
hold >>= (n); \
bits -= (unsigned)(n); \
} while (0)
/* Remove zero to seven bits as needed to go to a byte boundary */
#define BYTEBITS() \
do { \
hold >>= bits & 7; \
bits -= bits & 7; \
} while (0)
/*
inflate() uses a state machine to process as much input data and generate as
much output data as possible before returning. The state machine is
structured roughly as follows:
for (;;) switch (state) {
...
case STATEn:
if (not enough input data or output space to make progress)
return;
... make progress ...
state = STATEm;
break;
...
}
so when inflate() is called again, the same case is attempted again, and
if the appropriate resources are provided, the machine proceeds to the
next state. The NEEDBITS() macro is usually the way the state evaluates
whether it can proceed or should return. NEEDBITS() does the return if
the requested bits are not available. The typical use of the BITS macros
is:
NEEDBITS(n);
... do something with BITS(n) ...
DROPBITS(n);
where NEEDBITS(n) either returns from inflate() if there isn't enough
input left to load n bits into the accumulator, or it continues. BITS(n)
gives the low n bits in the accumulator. When done, DROPBITS(n) drops
the low n bits off the accumulator. INITBITS() clears the accumulator
and sets the number of available bits to zero. BYTEBITS() discards just
enough bits to put the accumulator on a byte boundary. After BYTEBITS()
and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
if there is no input available. The decoding of variable length codes uses
PULLBYTE() directly in order to pull just enough bytes to decode the next
code, and no more.
Some states loop until they get enough input, making sure that enough
state information is maintained to continue the loop where it left off
if NEEDBITS() returns in the loop. For example, want, need, and keep
would all have to actually be part of the saved state in case NEEDBITS()
returns:
case STATEw:
while (want < need) {
NEEDBITS(n);
keep[want++] = BITS(n);
DROPBITS(n);
}
state = STATEx;
case STATEx:
As shown above, if the next state is also the next case, then the break
is omitted.
A state may also return if there is not enough output space available to
complete that state. Those states are copying stored data, writing a
literal byte, and copying a matching string.
When returning, a "goto inf_leave" is used to update the total counters,
update the check value, and determine whether any progress has been made
during that inflate() call in order to return the proper return code.
Progress is defined as a change in either strm->avail_in or strm->avail_out.
When there is a window, goto inf_leave will update the window with the last
output written. If a goto inf_leave occurs in the middle of decompression
and there is no window currently, goto inf_leave will create one and copy
output to the window for the next call of inflate().
In this implementation, the flush parameter of inflate() only affects the
return code (per zlib.h). inflate() always writes as much as possible to
strm->next_out, given the space available and the provided input--the effect
documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
the allocation of and copying into a sliding window until necessary, which
provides the effect documented in zlib.h for Z_FINISH when the entire input
stream available. So the only thing the flush parameter actually does is:
when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
will return Z_BUF_ERROR if it has not reached the end of the stream.
*/
int ZEXPORT inflate(strm, flush)
z_streamp strm;
int flush;
{
struct inflate_state FAR *state;
z_const unsigned char FAR *next; /* next input */
unsigned char FAR *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
unsigned bits; /* bits in bit buffer */
unsigned in, out; /* save starting available input and output */
unsigned copy; /* number of stored or match bytes to copy */
unsigned char FAR *from; /* where to copy match bytes from */
code here; /* current decoding table entry */
code last; /* parent table entry */
unsigned len; /* length to copy for repeats, bits to drop */
int ret; /* return code */
#ifdef GUNZIP
unsigned char hbuf[4]; /* buffer for gzip header crc calculation */
#endif
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
if (strm == Z_NULL || strm->state == Z_NULL || strm->next_out == Z_NULL ||
(strm->next_in == Z_NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
LOAD();
in = have;
out = left;
ret = Z_OK;
for (;;)
switch (state->mode) {
case HEAD:
if (state->wrap == 0) {
state->mode = TYPEDO;
break;
}
NEEDBITS(16);
#ifdef GUNZIP
if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */
if (state->wbits == 0)
state->wbits = 15;
state->check = crc32(0L, Z_NULL, 0);
CRC2(state->check, hold);
INITBITS();
state->mode = FLAGS;
break;
}
state->flags = 0; /* expect zlib header */
if (state->head != Z_NULL)
state->head->done = -1;
if (!(state->wrap & 1) || /* check if zlib header allowed */
#else
if (
#endif
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
DROPBITS(4);
len = BITS(4) + 8;
if (state->wbits == 0)
state->wbits = len;
else if (len > state->wbits) {
strm->msg = (char *)"invalid window size";
state->mode = BAD;
break;
}
state->dmax = 1U << len;
Tracev((stderr, "inflate: zlib header ok\n"));
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
INITBITS();
break;
#ifdef GUNZIP
case FLAGS:
NEEDBITS(16);
state->flags = (int)(hold);
if ((state->flags & 0xff) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
if (state->flags & 0xe000) {
strm->msg = (char *)"unknown header flags set";
state->mode = BAD;
break;
}
if (state->head != Z_NULL)
state->head->text = (int)((hold >> 8) & 1);
if (state->flags & 0x0200) CRC2(state->check, hold);
INITBITS();
state->mode = TIME;
case TIME:
NEEDBITS(32);
if (state->head != Z_NULL)
state->head->time = hold;
if (state->flags & 0x0200) CRC4(state->check, hold);
INITBITS();
state->mode = OS;
case OS:
NEEDBITS(16);
if (state->head != Z_NULL) {
state->head->xflags = (int)(hold & 0xff);
state->head->os = (int)(hold >> 8);
}
if (state->flags & 0x0200) CRC2(state->check, hold);
INITBITS();
state->mode = EXLEN;
case EXLEN:
if (state->flags & 0x0400) {
NEEDBITS(16);
state->length = (unsigned)(hold);
if (state->head != Z_NULL)
state->head->extra_len = (unsigned)hold;
if (state->flags & 0x0200) CRC2(state->check, hold);
INITBITS();
}
else if (state->head != Z_NULL)
state->head->extra = Z_NULL;
state->mode = EXTRA;
case EXTRA:
if (state->flags & 0x0400) {
copy = state->length;
if (copy > have) copy = have;
if (copy) {
if (state->head != Z_NULL &&
state->head->extra != Z_NULL) {
len = state->head->extra_len - state->length;
zmemcpy(state->head->extra + len, next,
len + copy > state->head->extra_max ?
state->head->extra_max - len : copy);
}
if (state->flags & 0x0200)
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
state->length -= copy;
}
if (state->length) goto inf_leave;
}
state->length = 0;
state->mode = NAME;
case NAME:
if (state->flags & 0x0800) {
if (have == 0) goto inf_leave;
copy = 0;
do {
len = (unsigned)(next[copy++]);
if (state->head != Z_NULL &&
state->head->name != Z_NULL &&
state->length < state->head->name_max)
state->head->name[state->length++] = len;
} while (len && copy < have);
if (state->flags & 0x0200)
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
if (len) goto inf_leave;
}
else if (state->head != Z_NULL)
state->head->name = Z_NULL;
state->length = 0;
state->mode = COMMENT;
case COMMENT:
if (state->flags & 0x1000) {
if (have == 0) goto inf_leave;
copy = 0;
do {
len = (unsigned)(next[copy++]);
if (state->head != Z_NULL &&
state->head->comment != Z_NULL &&
state->length < state->head->comm_max)
state->head->comment[state->length++] = len;
} while (len && copy < have);
if (state->flags & 0x0200)
state->check = crc32(state->check, next, copy);
have -= copy;
next += copy;
if (len) goto inf_leave;
}
else if (state->head != Z_NULL)
state->head->comment = Z_NULL;
state->mode = HCRC;
case HCRC:
if (state->flags & 0x0200) {
NEEDBITS(16);
if (hold != (state->check & 0xffff)) {
strm->msg = (char *)"header crc mismatch";
state->mode = BAD;
break;
}
INITBITS();
}
if (state->head != Z_NULL) {
state->head->hcrc = (int)((state->flags >> 9) & 1);
state->head->done = 1;
}
strm->adler = state->check = crc32(0L, Z_NULL, 0);
state->mode = TYPE;
break;
#endif
case DICTID:
NEEDBITS(32);
strm->adler = state->check = ZSWAP32(hold);
INITBITS();
state->mode = DICT;
case DICT:
if (state->havedict == 0) {
RESTORE();
return Z_NEED_DICT;
}
strm->adler = state->check = adler32(0L, Z_NULL, 0);
state->mode = TYPE;
case TYPE:
if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave;
case TYPEDO:
if (state->last) {
BYTEBITS();
state->mode = CHECK;
break;
}
NEEDBITS(3);
state->last = BITS(1);
DROPBITS(1);
switch (BITS(2)) {
case 0: /* stored block */
Tracev((stderr, "inflate: stored block%s\n",
state->last ? " (last)" : ""));
state->mode = STORED;
break;
case 1: /* fixed block */
fixedtables(state);
Tracev((stderr, "inflate: fixed codes block%s\n",
state->last ? " (last)" : ""));
state->mode = LEN_; /* decode codes */
if (flush == Z_TREES) {
DROPBITS(2);
goto inf_leave;
}
break;
case 2: /* dynamic block */
Tracev((stderr, "inflate: dynamic codes block%s\n",
state->last ? " (last)" : ""));
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
break;
case STORED:
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
state->length = (unsigned)hold & 0xffff;
Tracev((stderr, "inflate: stored length %u\n",
state->length));
INITBITS();
state->mode = COPY_;
if (flush == Z_TREES) goto inf_leave;
case COPY_:
state->mode = COPY;
case COPY:
copy = state->length;
if (copy) {
if (copy > have) copy = have;
if (copy > left) copy = left;
if (copy == 0) goto inf_leave;
zmemcpy(put, next, copy);
have -= copy;
next += copy;
left -= copy;
put += copy;
state->length -= copy;
break;
}
Tracev((stderr, "inflate: stored end\n"));
state->mode = TYPE;
break;
case TABLE:
NEEDBITS(14);
state->nlen = BITS(5) + 257;
DROPBITS(5);
state->ndist = BITS(5) + 1;
DROPBITS(5);
state->ncode = BITS(4) + 4;
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
state->mode = BAD;
break;
}
#endif
Tracev((stderr, "inflate: table sizes ok\n"));
state->have = 0;
state->mode = LENLENS;
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
state->lens[order[state->have++]] = (unsigned short)BITS(3);
DROPBITS(3);
}
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lenbits = 7;
ret = inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
state->mode = BAD;
break;
}
Tracev((stderr, "inflate: code lengths ok\n"));
state->have = 0;
state->mode = CODELENS;
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
here = state->lencode[BITS(state->lenbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if (here.val < 16) {
DROPBITS(here.bits);
state->lens[state->have++] = here.val;
}
else {
if (here.val == 16) {
NEEDBITS(here.bits + 2);
DROPBITS(here.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
len = state->lens[state->have - 1];
copy = 3 + BITS(2);
DROPBITS(2);
}
else if (here.val == 17) {
NEEDBITS(here.bits + 3);
DROPBITS(here.bits);
len = 0;
copy = 3 + BITS(3);
DROPBITS(3);
}
else {
NEEDBITS(here.bits + 7);
DROPBITS(here.bits);
len = 0;
copy = 11 + BITS(7);
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
while (copy--)
state->lens[state->have++] = (unsigned short)len;
}
}
/* handle error breaks in while */
if (state->mode == BAD) break;
/* check for end-of-block code (better have one) */
if (state->lens[256] == 0) {
strm->msg = (char *)"invalid code -- missing end-of-block";
state->mode = BAD;
break;
}
/* build code tables -- note: do not change the lenbits or distbits
values here (9 and 6) without reading the comments in inftrees.h
concerning the ENOUGH constants, which depend on those values */
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lenbits = 9;
ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
state->distcode = (const code FAR *)(state->next);
state->distbits = 6;
ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
state->mode = BAD;
break;
}
Tracev((stderr, "inflate: codes ok\n"));
state->mode = LEN_;
if (flush == Z_TREES) goto inf_leave;
case LEN_:
state->mode = LEN;
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
inflate_fast(strm, out);
LOAD();
if (state->mode == TYPE)
state->back = -1;
break;
}
state->back = 0;
for (;;) {
here = state->lencode[BITS(state->lenbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if (here.op && (here.op & 0xf0) == 0) {
last = here;
for (;;) {
here = state->lencode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + here.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
state->back += last.bits;
}
DROPBITS(here.bits);
state->back += here.bits;
state->length = (unsigned)here.val;
if ((int)(here.op) == 0) {
Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
"inflate: literal '%c'\n" :
"inflate: literal 0x%02x\n", here.val));
state->mode = LIT;
break;
}
if (here.op & 32) {
Tracevv((stderr, "inflate: end of block\n"));
state->back = -1;
state->mode = TYPE;
break;
}
if (here.op & 64) {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
state->extra = (unsigned)(here.op) & 15;
state->mode = LENEXT;
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->length += BITS(state->extra);
DROPBITS(state->extra);
state->back += state->extra;
}
Tracevv((stderr, "inflate: length %u\n", state->length));
state->was = state->length;
state->mode = DIST;
case DIST:
for (;;) {
here = state->distcode[BITS(state->distbits)];
if ((unsigned)(here.bits) <= bits) break;
PULLBYTE();
}
if ((here.op & 0xf0) == 0) {
last = here;
for (;;) {
here = state->distcode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + here.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
state->back += last.bits;
}
DROPBITS(here.bits);
state->back += here.bits;
if (here.op & 64) {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
state->offset = (unsigned)here.val;
state->extra = (unsigned)(here.op) & 15;
state->mode = DISTEXT;
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->offset += BITS(state->extra);
DROPBITS(state->extra);
state->back += state->extra;
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
Tracevv((stderr, "inflate: distance %u\n", state->offset));
state->mode = MATCH;
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
if (state->offset > copy) { /* copy from window */
copy = state->offset - copy;
if (copy > state->whave) {
if (state->sane) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
Trace((stderr, "inflate.c too far\n"));
copy -= state->whave;
if (copy > state->length) copy = state->length;
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = 0;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
#endif
}
if (copy > state->wnext) {
copy -= state->wnext;
from = state->window + (state->wsize - copy);
}
else
from = state->window + (state->wnext - copy);
if (copy > state->length) copy = state->length;
}
else { /* copy from output */
from = put - state->offset;
copy = state->length;
}
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = *from++;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:
if (left == 0) goto inf_leave;
*put++ = (unsigned char)(state->length);
left--;
state->mode = LEN;
break;
case CHECK:
if (state->wrap) {
NEEDBITS(32);
out -= left;
strm->total_out += out;
state->total += out;
if (out)
strm->adler = state->check =
UPDATE(state->check, put - out, out);
out = left;
if ((
#ifdef GUNZIP
state->flags ? hold :
#endif
ZSWAP32(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
state->mode = BAD;
break;
}
INITBITS();
Tracev((stderr, "inflate: check matches trailer\n"));
}
#ifdef GUNZIP
state->mode = LENGTH;
case LENGTH:
if (state->wrap && state->flags) {
NEEDBITS(32);
if (hold != (state->total & 0xffffffffUL)) {
strm->msg = (char *)"incorrect length check";
state->mode = BAD;
break;
}
INITBITS();
Tracev((stderr, "inflate: length matches trailer\n"));
}
#endif
state->mode = DONE;
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
default:
return Z_STREAM_ERROR;
}
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call updatewindow() to create and/or update the window state.
Note: a memory error from inflate() is non-recoverable.
*/
inf_leave:
RESTORE();
if (state->wsize || (out != strm->avail_out && state->mode < BAD &&
(state->mode < CHECK || flush != Z_FINISH)))
if (updatewindow(strm, strm->next_out, out - strm->avail_out)) {
state->mode = MEM;
return Z_MEM_ERROR;
}
in -= strm->avail_in;
out -= strm->avail_out;
strm->total_in += in;
strm->total_out += out;
state->total += out;
if (state->wrap && out)
strm->adler = state->check =
UPDATE(state->check, strm->next_out - out, out);
strm->data_type = state->bits + (state->last ? 64 : 0) +
(state->mode == TYPE ? 128 : 0) +
(state->mode == LEN_ || state->mode == COPY_ ? 256 : 0);
if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
ret = Z_BUF_ERROR;
return ret;
}
int ZEXPORT inflateEnd(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0)
return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (state->window != Z_NULL) ZFREE(strm, state->window);
ZFREE(strm, strm->state);
strm->state = Z_NULL;
Tracev((stderr, "inflate: end\n"));
return Z_OK;
}
int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength)
z_streamp strm;
Bytef *dictionary;
uInt *dictLength;
{
struct inflate_state FAR *state;
/* check state */
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
/* copy dictionary */
if (state->whave && dictionary != Z_NULL) {
zmemcpy(dictionary, state->window + state->wnext,
state->whave - state->wnext);
zmemcpy(dictionary + state->whave - state->wnext,
state->window, state->wnext);
}
if (dictLength != Z_NULL)
*dictLength = state->whave;
return Z_OK;
}
int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength)
z_streamp strm;
const Bytef *dictionary;
uInt dictLength;
{
struct inflate_state FAR *state;
unsigned long dictid;
int ret;
/* check state */
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (state->wrap != 0 && state->mode != DICT)
return Z_STREAM_ERROR;
/* check for correct dictionary identifier */
if (state->mode == DICT) {
dictid = adler32(0L, Z_NULL, 0);
dictid = adler32(dictid, dictionary, dictLength);
if (dictid != state->check)
return Z_DATA_ERROR;
}
/* copy dictionary to window using updatewindow(), which will amend the
existing dictionary if appropriate */
ret = updatewindow(strm, dictionary + dictLength, dictLength);
if (ret) {
state->mode = MEM;
return Z_MEM_ERROR;
}
state->havedict = 1;
Tracev((stderr, "inflate: dictionary set\n"));
return Z_OK;
}
int ZEXPORT inflateGetHeader(strm, head)
z_streamp strm;
gz_headerp head;
{
struct inflate_state FAR *state;
/* check state */
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if ((state->wrap & 2) == 0) return Z_STREAM_ERROR;
/* save header structure */
state->head = head;
head->done = 0;
return Z_OK;
}
/*
Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
or when out of input. When called, *have is the number of pattern bytes
found in order so far, in 0..3. On return *have is updated to the new
state. If on return *have equals four, then the pattern was found and the
return value is how many bytes were read including the last byte of the
pattern. If *have is less than four, then the pattern has not been found
yet and the return value is len. In the latter case, syncsearch() can be
called again with more data and the *have state. *have is initialized to
zero for the first call.
*/
local unsigned syncsearch(have, buf, len)
unsigned FAR *have;
const unsigned char FAR *buf;
unsigned len;
{
unsigned got;
unsigned next;
got = *have;
next = 0;
while (next < len && got < 4) {
if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
got++;
else if (buf[next])
got = 0;
else
got = 4 - got;
next++;
}
*have = got;
return next;
}
int ZEXPORT inflateSync(strm)
z_streamp strm;
{
unsigned len; /* number of bytes to look at or looked at */
unsigned long in, out; /* temporary to save total_in and total_out */
unsigned char buf[4]; /* to restore bit buffer to byte string */
struct inflate_state FAR *state;
/* check parameters */
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
/* if first time, start search in bit buffer */
if (state->mode != SYNC) {
state->mode = SYNC;
state->hold <<= state->bits & 7;
state->bits -= state->bits & 7;
len = 0;
while (state->bits >= 8) {
buf[len++] = (unsigned char)(state->hold);
state->hold >>= 8;
state->bits -= 8;
}
state->have = 0;
syncsearch(&(state->have), buf, len);
}
/* search available input */
len = syncsearch(&(state->have), strm->next_in, strm->avail_in);
strm->avail_in -= len;
strm->next_in += len;
strm->total_in += len;
/* return no joy or set up to restart inflate() on a new block */
if (state->have != 4) return Z_DATA_ERROR;
in = strm->total_in; out = strm->total_out;
inflateReset(strm);
strm->total_in = in; strm->total_out = out;
state->mode = TYPE;
return Z_OK;
}
/*
Returns true if inflate is currently at the end of a block generated by
Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
implementation to provide an additional safety check. PPP uses
Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
block. When decompressing, PPP checks that at the end of input packet,
inflate is waiting for these length bytes.
*/
int ZEXPORT inflateSyncPoint(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
return state->mode == STORED && state->bits == 0;
}
int ZEXPORT inflateCopy(dest, source)
z_streamp dest;
z_streamp source;
{
struct inflate_state FAR *state;
struct inflate_state FAR *copy;
unsigned char FAR *window;
unsigned wsize;
/* check input */
if (dest == Z_NULL || source == Z_NULL || source->state == Z_NULL ||
source->zalloc == (alloc_func)0 || source->zfree == (free_func)0)
return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)source->state;
/* allocate space */
copy = (struct inflate_state FAR *)
ZALLOC(source, 1, sizeof(struct inflate_state));
if (copy == Z_NULL) return Z_MEM_ERROR;
window = Z_NULL;
if (state->window != Z_NULL) {
window = (unsigned char FAR *)
ZALLOC(source, 1U << state->wbits, sizeof(unsigned char));
if (window == Z_NULL) {
ZFREE(source, copy);
return Z_MEM_ERROR;
}
}
/* copy state */
zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state));
if (state->lencode >= state->codes &&
state->lencode <= state->codes + ENOUGH - 1) {
copy->lencode = copy->codes + (state->lencode - state->codes);
copy->distcode = copy->codes + (state->distcode - state->codes);
}
copy->next = copy->codes + (state->next - state->codes);
if (window != Z_NULL) {
wsize = 1U << state->wbits;
zmemcpy(window, state->window, wsize);
}
copy->window = window;
dest->state = (struct internal_state FAR *)copy;
return Z_OK;
}
int ZEXPORT inflateUndermine(strm, subvert)
z_streamp strm;
int subvert;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
state = (struct inflate_state FAR *)strm->state;
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
state->sane = !subvert;
return Z_OK;
#else
state->sane = 1;
return Z_DATA_ERROR;
#endif
}
long ZEXPORT inflateMark(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL)
return (long)(((unsigned long)0 - 1) << 16);
state = (struct inflate_state FAR *)strm->state;
return (long)(((unsigned long)((long)state->back)) << 16) +
(state->mode == COPY ? state->length :
(state->mode == MATCH ? state->was - state->length : 0));
}
unsigned long ZEXPORT inflateCodesUsed(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return -1L;
state = (struct inflate_state FAR *)strm->state;
return state->next - state->codes;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5507_0 |
crossvul-cpp_data_good_2087_1 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2013 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Rasmus Lerdorf <rasmus@php.net> |
| Stig Bakken <ssb@php.net> |
| Jim Winstead <jimw@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/* gd 1.2 is copyright 1994, 1995, Quest Protein Database Center,
Cold Spring Harbor Labs. */
/* Note that there is no code from the gd package in this file */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/head.h"
#include <math.h>
#include "SAPI.h"
#include "php_gd.h"
#include "ext/standard/info.h"
#include "php_open_temporary_file.h"
#if HAVE_SYS_WAIT_H
# include <sys/wait.h>
#endif
#if HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef PHP_WIN32
# include <io.h>
# include <fcntl.h>
# include <windows.h>
# include <Winuser.h>
# include <Wingdi.h>
#endif
#ifdef HAVE_GD_XPM
# include <X11/xpm.h>
#endif
# include "gd_compat.h"
static int le_gd, le_gd_font;
#if HAVE_LIBT1
#include <t1lib.h>
static int le_ps_font, le_ps_enc;
static void php_free_ps_font(zend_rsrc_list_entry *rsrc TSRMLS_DC);
static void php_free_ps_enc(zend_rsrc_list_entry *rsrc TSRMLS_DC);
#endif
#include <gd.h>
#include <gdfontt.h> /* 1 Tiny font */
#include <gdfonts.h> /* 2 Small font */
#include <gdfontmb.h> /* 3 Medium bold font */
#include <gdfontl.h> /* 4 Large font */
#include <gdfontg.h> /* 5 Giant font */
#ifdef ENABLE_GD_TTF
# ifdef HAVE_LIBFREETYPE
# include <ft2build.h>
# include FT_FREETYPE_H
# endif
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#ifdef ENABLE_GD_TTF
static void php_imagettftext_common(INTERNAL_FUNCTION_PARAMETERS, int, int);
#endif
#include "gd_ctx.c"
/* Section Filters Declarations */
/* IMPORTANT NOTE FOR NEW FILTER
* Do not forget to update:
* IMAGE_FILTER_MAX: define the last filter index
* IMAGE_FILTER_MAX_ARGS: define the biggest amount of arguments
* image_filter array in PHP_FUNCTION(imagefilter)
* */
#define IMAGE_FILTER_NEGATE 0
#define IMAGE_FILTER_GRAYSCALE 1
#define IMAGE_FILTER_BRIGHTNESS 2
#define IMAGE_FILTER_CONTRAST 3
#define IMAGE_FILTER_COLORIZE 4
#define IMAGE_FILTER_EDGEDETECT 5
#define IMAGE_FILTER_EMBOSS 6
#define IMAGE_FILTER_GAUSSIAN_BLUR 7
#define IMAGE_FILTER_SELECTIVE_BLUR 8
#define IMAGE_FILTER_MEAN_REMOVAL 9
#define IMAGE_FILTER_SMOOTH 10
#define IMAGE_FILTER_PIXELATE 11
#define IMAGE_FILTER_MAX 11
#define IMAGE_FILTER_MAX_ARGS 6
static void php_image_filter_negate(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_grayscale(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_contrast(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_colorize(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_edgedetect(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_emboss(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_gaussian_blur(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_selective_blur(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_mean_removal(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_smooth(INTERNAL_FUNCTION_PARAMETERS);
static void php_image_filter_pixelate(INTERNAL_FUNCTION_PARAMETERS);
/* End Section filters declarations */
static gdImagePtr _php_image_create_from_string (zval **Data, char *tn, gdImagePtr (*ioctx_func_p)() TSRMLS_DC);
static void _php_image_create_from(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, gdImagePtr (*func_p)(), gdImagePtr (*ioctx_func_p)());
static void _php_image_output(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, void (*func_p)());
static int _php_image_type(char data[8]);
static void _php_image_convert(INTERNAL_FUNCTION_PARAMETERS, int image_type);
static void _php_image_bw_convert(gdImagePtr im_org, gdIOCtx *out, int threshold);
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO(arginfo_gd_info, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageloadfont, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesetstyle, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, styles) /* ARRAY_INFO(0, styles, 0) */
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatetruecolor, 0)
ZEND_ARG_INFO(0, x_size)
ZEND_ARG_INFO(0, y_size)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageistruecolor, 0)
ZEND_ARG_INFO(0, im)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagetruecolortopalette, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, ditherFlag)
ZEND_ARG_INFO(0, colorsWanted)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagepalettetotruecolor, 0)
ZEND_ARG_INFO(0, im)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolormatch, 0)
ZEND_ARG_INFO(0, im1)
ZEND_ARG_INFO(0, im2)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesetthickness, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, thickness)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefilledellipse, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, cx)
ZEND_ARG_INFO(0, cy)
ZEND_ARG_INFO(0, w)
ZEND_ARG_INFO(0, h)
ZEND_ARG_INFO(0, color)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefilledarc, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, cx)
ZEND_ARG_INFO(0, cy)
ZEND_ARG_INFO(0, w)
ZEND_ARG_INFO(0, h)
ZEND_ARG_INFO(0, s)
ZEND_ARG_INFO(0, e)
ZEND_ARG_INFO(0, col)
ZEND_ARG_INFO(0, style)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagealphablending, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, blend)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesavealpha, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, save)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagelayereffect, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, effect)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorallocatealpha, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_ARG_INFO(0, alpha)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorresolvealpha, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_ARG_INFO(0, alpha)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosestalpha, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_ARG_INFO(0, alpha)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorexactalpha, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_ARG_INFO(0, alpha)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecopyresampled, 0)
ZEND_ARG_INFO(0, dst_im)
ZEND_ARG_INFO(0, src_im)
ZEND_ARG_INFO(0, dst_x)
ZEND_ARG_INFO(0, dst_y)
ZEND_ARG_INFO(0, src_x)
ZEND_ARG_INFO(0, src_y)
ZEND_ARG_INFO(0, dst_w)
ZEND_ARG_INFO(0, dst_h)
ZEND_ARG_INFO(0, src_w)
ZEND_ARG_INFO(0, src_h)
ZEND_END_ARG_INFO()
#ifdef PHP_WIN32
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegrabwindow, 0, 0, 1)
ZEND_ARG_INFO(0, handle)
ZEND_ARG_INFO(0, client_area)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagegrabscreen, 0)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagerotate, 0, 0, 3)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, angle)
ZEND_ARG_INFO(0, bgdcolor)
ZEND_ARG_INFO(0, ignoretransparent)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesettile, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, tile)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesetbrush, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, brush)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreate, 0)
ZEND_ARG_INFO(0, x_size)
ZEND_ARG_INFO(0, y_size)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagetypes, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromstring, 0)
ZEND_ARG_INFO(0, image)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgif, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#ifdef HAVE_GD_JPG
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromjpeg, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#endif
#ifdef HAVE_GD_PNG
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefrompng, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#endif
#ifdef HAVE_GD_WEBP
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromwebp, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromxbm, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#if defined(HAVE_GD_XPM)
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromxpm, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromwbmp, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd2, 0)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecreatefromgd2part, 0)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, srcX)
ZEND_ARG_INFO(0, srcY)
ZEND_ARG_INFO(0, width)
ZEND_ARG_INFO(0, height)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagexbm, 0, 0, 2)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, foreground)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegif, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#ifdef HAVE_GD_PNG
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepng, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#endif
#ifdef HAVE_GD_WEBP
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagewebp, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
#endif
#ifdef HAVE_GD_JPG
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagejpeg, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, quality)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagewbmp, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, foreground)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegd, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagegd2, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, chunk_size)
ZEND_ARG_INFO(0, type)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagedestroy, 0)
ZEND_ARG_INFO(0, im)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorallocate, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagepalettecopy, 0)
ZEND_ARG_INFO(0, dst)
ZEND_ARG_INFO(0, src)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorat, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosest, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorclosesthwb, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolordeallocate, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorresolve, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorexact, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecolorset, 0, 0, 5)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, color)
ZEND_ARG_INFO(0, red)
ZEND_ARG_INFO(0, green)
ZEND_ARG_INFO(0, blue)
ZEND_ARG_INFO(0, alpha)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorsforindex, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagegammacorrect, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, inputgamma)
ZEND_ARG_INFO(0, outputgamma)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesetpixel, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageline, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x1)
ZEND_ARG_INFO(0, y1)
ZEND_ARG_INFO(0, x2)
ZEND_ARG_INFO(0, y2)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagedashedline, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x1)
ZEND_ARG_INFO(0, y1)
ZEND_ARG_INFO(0, x2)
ZEND_ARG_INFO(0, y2)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagerectangle, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x1)
ZEND_ARG_INFO(0, y1)
ZEND_ARG_INFO(0, x2)
ZEND_ARG_INFO(0, y2)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefilledrectangle, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x1)
ZEND_ARG_INFO(0, y1)
ZEND_ARG_INFO(0, x2)
ZEND_ARG_INFO(0, y2)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagearc, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, cx)
ZEND_ARG_INFO(0, cy)
ZEND_ARG_INFO(0, w)
ZEND_ARG_INFO(0, h)
ZEND_ARG_INFO(0, s)
ZEND_ARG_INFO(0, e)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageellipse, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, cx)
ZEND_ARG_INFO(0, cy)
ZEND_ARG_INFO(0, w)
ZEND_ARG_INFO(0, h)
ZEND_ARG_INFO(0, color)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefilltoborder, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, border)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefill, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecolorstotal, 0)
ZEND_ARG_INFO(0, im)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecolortransparent, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imageinterlace, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, interlace)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagepolygon, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */
ZEND_ARG_INFO(0, num_pos)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefilledpolygon, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, points) /* ARRAY_INFO(0, points, 0) */
ZEND_ARG_INFO(0, num_pos)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefontwidth, 0)
ZEND_ARG_INFO(0, font)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagefontheight, 0)
ZEND_ARG_INFO(0, font)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagechar, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, font)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, c)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecharup, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, font)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, c)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagestring, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, font)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, str)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagestringup, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, font)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, str)
ZEND_ARG_INFO(0, col)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecopy, 0)
ZEND_ARG_INFO(0, dst_im)
ZEND_ARG_INFO(0, src_im)
ZEND_ARG_INFO(0, dst_x)
ZEND_ARG_INFO(0, dst_y)
ZEND_ARG_INFO(0, src_x)
ZEND_ARG_INFO(0, src_y)
ZEND_ARG_INFO(0, src_w)
ZEND_ARG_INFO(0, src_h)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecopymerge, 0)
ZEND_ARG_INFO(0, src_im)
ZEND_ARG_INFO(0, dst_im)
ZEND_ARG_INFO(0, dst_x)
ZEND_ARG_INFO(0, dst_y)
ZEND_ARG_INFO(0, src_x)
ZEND_ARG_INFO(0, src_y)
ZEND_ARG_INFO(0, src_w)
ZEND_ARG_INFO(0, src_h)
ZEND_ARG_INFO(0, pct)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecopymergegray, 0)
ZEND_ARG_INFO(0, src_im)
ZEND_ARG_INFO(0, dst_im)
ZEND_ARG_INFO(0, dst_x)
ZEND_ARG_INFO(0, dst_y)
ZEND_ARG_INFO(0, src_x)
ZEND_ARG_INFO(0, src_y)
ZEND_ARG_INFO(0, src_w)
ZEND_ARG_INFO(0, src_h)
ZEND_ARG_INFO(0, pct)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagecopyresized, 0)
ZEND_ARG_INFO(0, dst_im)
ZEND_ARG_INFO(0, src_im)
ZEND_ARG_INFO(0, dst_x)
ZEND_ARG_INFO(0, dst_y)
ZEND_ARG_INFO(0, src_x)
ZEND_ARG_INFO(0, src_y)
ZEND_ARG_INFO(0, dst_w)
ZEND_ARG_INFO(0, dst_h)
ZEND_ARG_INFO(0, src_w)
ZEND_ARG_INFO(0, src_h)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesx, 0)
ZEND_ARG_INFO(0, im)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesy, 0)
ZEND_ARG_INFO(0, im)
ZEND_END_ARG_INFO()
#ifdef ENABLE_GD_TTF
#if HAVE_LIBFREETYPE
ZEND_BEGIN_ARG_INFO_EX(arginfo_imageftbbox, 0, 0, 4)
ZEND_ARG_INFO(0, size)
ZEND_ARG_INFO(0, angle)
ZEND_ARG_INFO(0, font_file)
ZEND_ARG_INFO(0, text)
ZEND_ARG_INFO(0, extrainfo) /* ARRAY_INFO(0, extrainfo, 0) */
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagefttext, 0, 0, 8)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, size)
ZEND_ARG_INFO(0, angle)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, col)
ZEND_ARG_INFO(0, font_file)
ZEND_ARG_INFO(0, text)
ZEND_ARG_INFO(0, extrainfo) /* ARRAY_INFO(0, extrainfo, 0) */
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO(arginfo_imagettfbbox, 0)
ZEND_ARG_INFO(0, size)
ZEND_ARG_INFO(0, angle)
ZEND_ARG_INFO(0, font_file)
ZEND_ARG_INFO(0, text)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagettftext, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, size)
ZEND_ARG_INFO(0, angle)
ZEND_ARG_INFO(0, x)
ZEND_ARG_INFO(0, y)
ZEND_ARG_INFO(0, col)
ZEND_ARG_INFO(0, font_file)
ZEND_ARG_INFO(0, text)
ZEND_END_ARG_INFO()
#endif
#ifdef HAVE_LIBT1
ZEND_BEGIN_ARG_INFO(arginfo_imagepsloadfont, 0)
ZEND_ARG_INFO(0, pathname)
ZEND_END_ARG_INFO()
/*
ZEND_BEGIN_ARG_INFO(arginfo_imagepscopyfont, 0)
ZEND_ARG_INFO(0, font_index)
ZEND_END_ARG_INFO()
*/
ZEND_BEGIN_ARG_INFO(arginfo_imagepsfreefont, 0)
ZEND_ARG_INFO(0, font_index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagepsencodefont, 0)
ZEND_ARG_INFO(0, font_index)
ZEND_ARG_INFO(0, filename)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagepsextendfont, 0)
ZEND_ARG_INFO(0, font_index)
ZEND_ARG_INFO(0, extend)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagepsslantfont, 0)
ZEND_ARG_INFO(0, font_index)
ZEND_ARG_INFO(0, slant)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepstext, 0, 0, 8)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, text)
ZEND_ARG_INFO(0, font)
ZEND_ARG_INFO(0, size)
ZEND_ARG_INFO(0, foreground)
ZEND_ARG_INFO(0, background)
ZEND_ARG_INFO(0, xcoord)
ZEND_ARG_INFO(0, ycoord)
ZEND_ARG_INFO(0, space)
ZEND_ARG_INFO(0, tightness)
ZEND_ARG_INFO(0, angle)
ZEND_ARG_INFO(0, antialias)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagepsbbox, 0, 0, 3)
ZEND_ARG_INFO(0, text)
ZEND_ARG_INFO(0, font)
ZEND_ARG_INFO(0, size)
ZEND_ARG_INFO(0, space)
ZEND_ARG_INFO(0, tightness)
ZEND_ARG_INFO(0, angle)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO_EX(arginfo_image2wbmp, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, threshold)
ZEND_END_ARG_INFO()
#if defined(HAVE_GD_JPG)
ZEND_BEGIN_ARG_INFO(arginfo_jpeg2wbmp, 0)
ZEND_ARG_INFO(0, f_org)
ZEND_ARG_INFO(0, f_dest)
ZEND_ARG_INFO(0, d_height)
ZEND_ARG_INFO(0, d_width)
ZEND_ARG_INFO(0, d_threshold)
ZEND_END_ARG_INFO()
#endif
#if defined(HAVE_GD_PNG)
ZEND_BEGIN_ARG_INFO(arginfo_png2wbmp, 0)
ZEND_ARG_INFO(0, f_org)
ZEND_ARG_INFO(0, f_dest)
ZEND_ARG_INFO(0, d_height)
ZEND_ARG_INFO(0, d_width)
ZEND_ARG_INFO(0, d_threshold)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagefilter, 0, 0, 2)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, filtertype)
ZEND_ARG_INFO(0, arg1)
ZEND_ARG_INFO(0, arg2)
ZEND_ARG_INFO(0, arg3)
ZEND_ARG_INFO(0, arg4)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageconvolution, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, matrix3x3) /* ARRAY_INFO(0, matrix3x3, 0) */
ZEND_ARG_INFO(0, div)
ZEND_ARG_INFO(0, offset)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageflip, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, mode)
ZEND_END_ARG_INFO()
#ifdef HAVE_GD_BUNDLED
ZEND_BEGIN_ARG_INFO(arginfo_imageantialias, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, on)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO(arginfo_imagecrop, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, rect)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagecropauto, 0, 0, 1)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, mode)
ZEND_ARG_INFO(0, threshold)
ZEND_ARG_INFO(0, color)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imagescale, 0, 0, 2)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, new_width)
ZEND_ARG_INFO(0, new_height)
ZEND_ARG_INFO(0, mode)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imageaffine, 0, 0, 2)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, affine)
ZEND_ARG_INFO(0, clip)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_imageaffinematrixget, 0, 0, 1)
ZEND_ARG_INFO(0, type)
ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imageaffinematrixconcat, 0)
ZEND_ARG_INFO(0, m1)
ZEND_ARG_INFO(0, m2)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_imagesetinterpolation, 0)
ZEND_ARG_INFO(0, im)
ZEND_ARG_INFO(0, method)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ gd_functions[]
*/
const zend_function_entry gd_functions[] = {
PHP_FE(gd_info, arginfo_gd_info)
PHP_FE(imagearc, arginfo_imagearc)
PHP_FE(imageellipse, arginfo_imageellipse)
PHP_FE(imagechar, arginfo_imagechar)
PHP_FE(imagecharup, arginfo_imagecharup)
PHP_FE(imagecolorat, arginfo_imagecolorat)
PHP_FE(imagecolorallocate, arginfo_imagecolorallocate)
PHP_FE(imagepalettecopy, arginfo_imagepalettecopy)
PHP_FE(imagecreatefromstring, arginfo_imagecreatefromstring)
PHP_FE(imagecolorclosest, arginfo_imagecolorclosest)
PHP_FE(imagecolorclosesthwb, arginfo_imagecolorclosesthwb)
PHP_FE(imagecolordeallocate, arginfo_imagecolordeallocate)
PHP_FE(imagecolorresolve, arginfo_imagecolorresolve)
PHP_FE(imagecolorexact, arginfo_imagecolorexact)
PHP_FE(imagecolorset, arginfo_imagecolorset)
PHP_FE(imagecolortransparent, arginfo_imagecolortransparent)
PHP_FE(imagecolorstotal, arginfo_imagecolorstotal)
PHP_FE(imagecolorsforindex, arginfo_imagecolorsforindex)
PHP_FE(imagecopy, arginfo_imagecopy)
PHP_FE(imagecopymerge, arginfo_imagecopymerge)
PHP_FE(imagecopymergegray, arginfo_imagecopymergegray)
PHP_FE(imagecopyresized, arginfo_imagecopyresized)
PHP_FE(imagecreate, arginfo_imagecreate)
PHP_FE(imagecreatetruecolor, arginfo_imagecreatetruecolor)
PHP_FE(imageistruecolor, arginfo_imageistruecolor)
PHP_FE(imagetruecolortopalette, arginfo_imagetruecolortopalette)
PHP_FE(imagepalettetotruecolor, arginfo_imagepalettetotruecolor)
PHP_FE(imagesetthickness, arginfo_imagesetthickness)
PHP_FE(imagefilledarc, arginfo_imagefilledarc)
PHP_FE(imagefilledellipse, arginfo_imagefilledellipse)
PHP_FE(imagealphablending, arginfo_imagealphablending)
PHP_FE(imagesavealpha, arginfo_imagesavealpha)
PHP_FE(imagecolorallocatealpha, arginfo_imagecolorallocatealpha)
PHP_FE(imagecolorresolvealpha, arginfo_imagecolorresolvealpha)
PHP_FE(imagecolorclosestalpha, arginfo_imagecolorclosestalpha)
PHP_FE(imagecolorexactalpha, arginfo_imagecolorexactalpha)
PHP_FE(imagecopyresampled, arginfo_imagecopyresampled)
#ifdef PHP_WIN32
PHP_FE(imagegrabwindow, arginfo_imagegrabwindow)
PHP_FE(imagegrabscreen, arginfo_imagegrabscreen)
#endif
PHP_FE(imagerotate, arginfo_imagerotate)
PHP_FE(imageflip, arginfo_imageflip)
#ifdef HAVE_GD_BUNDLED
PHP_FE(imageantialias, arginfo_imageantialias)
#endif
PHP_FE(imagecrop, arginfo_imagecrop)
PHP_FE(imagecropauto, arginfo_imagecropauto)
PHP_FE(imagescale, arginfo_imagescale)
PHP_FE(imageaffine, arginfo_imageaffine)
PHP_FE(imageaffinematrixconcat, arginfo_imageaffinematrixconcat)
PHP_FE(imageaffinematrixget, arginfo_imageaffinematrixget)
PHP_FE(imagesetinterpolation, arginfo_imagesetinterpolation)
PHP_FE(imagesettile, arginfo_imagesettile)
PHP_FE(imagesetbrush, arginfo_imagesetbrush)
PHP_FE(imagesetstyle, arginfo_imagesetstyle)
#ifdef HAVE_GD_PNG
PHP_FE(imagecreatefrompng, arginfo_imagecreatefrompng)
#endif
#ifdef HAVE_GD_WEBP
PHP_FE(imagecreatefromwebp, arginfo_imagecreatefromwebp)
#endif
PHP_FE(imagecreatefromgif, arginfo_imagecreatefromgif)
#ifdef HAVE_GD_JPG
PHP_FE(imagecreatefromjpeg, arginfo_imagecreatefromjpeg)
#endif
PHP_FE(imagecreatefromwbmp, arginfo_imagecreatefromwbmp)
PHP_FE(imagecreatefromxbm, arginfo_imagecreatefromxbm)
#if defined(HAVE_GD_XPM)
PHP_FE(imagecreatefromxpm, arginfo_imagecreatefromxpm)
#endif
PHP_FE(imagecreatefromgd, arginfo_imagecreatefromgd)
PHP_FE(imagecreatefromgd2, arginfo_imagecreatefromgd2)
PHP_FE(imagecreatefromgd2part, arginfo_imagecreatefromgd2part)
#ifdef HAVE_GD_PNG
PHP_FE(imagepng, arginfo_imagepng)
#endif
#ifdef HAVE_GD_WEBP
PHP_FE(imagewebp, arginfo_imagewebp)
#endif
PHP_FE(imagegif, arginfo_imagegif)
#ifdef HAVE_GD_JPG
PHP_FE(imagejpeg, arginfo_imagejpeg)
#endif
PHP_FE(imagewbmp, arginfo_imagewbmp)
PHP_FE(imagegd, arginfo_imagegd)
PHP_FE(imagegd2, arginfo_imagegd2)
PHP_FE(imagedestroy, arginfo_imagedestroy)
PHP_FE(imagegammacorrect, arginfo_imagegammacorrect)
PHP_FE(imagefill, arginfo_imagefill)
PHP_FE(imagefilledpolygon, arginfo_imagefilledpolygon)
PHP_FE(imagefilledrectangle, arginfo_imagefilledrectangle)
PHP_FE(imagefilltoborder, arginfo_imagefilltoborder)
PHP_FE(imagefontwidth, arginfo_imagefontwidth)
PHP_FE(imagefontheight, arginfo_imagefontheight)
PHP_FE(imageinterlace, arginfo_imageinterlace)
PHP_FE(imageline, arginfo_imageline)
PHP_FE(imageloadfont, arginfo_imageloadfont)
PHP_FE(imagepolygon, arginfo_imagepolygon)
PHP_FE(imagerectangle, arginfo_imagerectangle)
PHP_FE(imagesetpixel, arginfo_imagesetpixel)
PHP_FE(imagestring, arginfo_imagestring)
PHP_FE(imagestringup, arginfo_imagestringup)
PHP_FE(imagesx, arginfo_imagesx)
PHP_FE(imagesy, arginfo_imagesy)
PHP_FE(imagedashedline, arginfo_imagedashedline)
#ifdef ENABLE_GD_TTF
PHP_FE(imagettfbbox, arginfo_imagettfbbox)
PHP_FE(imagettftext, arginfo_imagettftext)
#if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE
PHP_FE(imageftbbox, arginfo_imageftbbox)
PHP_FE(imagefttext, arginfo_imagefttext)
#endif
#endif
#ifdef HAVE_LIBT1
PHP_FE(imagepsloadfont, arginfo_imagepsloadfont)
/*
PHP_FE(imagepscopyfont, arginfo_imagepscopyfont)
*/
PHP_FE(imagepsfreefont, arginfo_imagepsfreefont)
PHP_FE(imagepsencodefont, arginfo_imagepsencodefont)
PHP_FE(imagepsextendfont, arginfo_imagepsextendfont)
PHP_FE(imagepsslantfont, arginfo_imagepsslantfont)
PHP_FE(imagepstext, arginfo_imagepstext)
PHP_FE(imagepsbbox, arginfo_imagepsbbox)
#endif
PHP_FE(imagetypes, arginfo_imagetypes)
#if defined(HAVE_GD_JPG)
PHP_FE(jpeg2wbmp, arginfo_jpeg2wbmp)
#endif
#if defined(HAVE_GD_PNG)
PHP_FE(png2wbmp, arginfo_png2wbmp)
#endif
PHP_FE(image2wbmp, arginfo_image2wbmp)
PHP_FE(imagelayereffect, arginfo_imagelayereffect)
PHP_FE(imagexbm, arginfo_imagexbm)
PHP_FE(imagecolormatch, arginfo_imagecolormatch)
/* gd filters */
PHP_FE(imagefilter, arginfo_imagefilter)
PHP_FE(imageconvolution, arginfo_imageconvolution)
PHP_FE_END
};
/* }}} */
zend_module_entry gd_module_entry = {
STANDARD_MODULE_HEADER,
"gd",
gd_functions,
PHP_MINIT(gd),
#if HAVE_LIBT1
PHP_MSHUTDOWN(gd),
#else
NULL,
#endif
NULL,
#if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE
PHP_RSHUTDOWN(gd),
#else
NULL,
#endif
PHP_MINFO(gd),
NO_VERSION_YET,
STANDARD_MODULE_PROPERTIES
};
#ifdef COMPILE_DL_GD
ZEND_GET_MODULE(gd)
#endif
/* {{{ PHP_INI_BEGIN */
PHP_INI_BEGIN()
PHP_INI_ENTRY("gd.jpeg_ignore_warning", "0", PHP_INI_ALL, NULL)
PHP_INI_END()
/* }}} */
/* {{{ php_free_gd_image
*/
static void php_free_gd_image(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
gdImageDestroy((gdImagePtr) rsrc->ptr);
}
/* }}} */
/* {{{ php_free_gd_font
*/
static void php_free_gd_font(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
gdFontPtr fp = (gdFontPtr) rsrc->ptr;
if (fp->data) {
efree(fp->data);
}
efree(fp);
}
/* }}} */
#ifndef HAVE_GD_BUNDLED
/* {{{ php_gd_error_method
*/
void php_gd_error_method(int type, const char *format, va_list args)
{
TSRMLS_FETCH();
php_verror(NULL, "", type, format, args TSRMLS_CC);
}
/* }}} */
#endif
/* {{{ PHP_MSHUTDOWN_FUNCTION
*/
#if HAVE_LIBT1
PHP_MSHUTDOWN_FUNCTION(gd)
{
T1_CloseLib();
#if HAVE_GD_BUNDLED && HAVE_LIBFREETYPE
gdFontCacheMutexShutdown();
#endif
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
#endif
/* }}} */
/* {{{ PHP_MINIT_FUNCTION
*/
PHP_MINIT_FUNCTION(gd)
{
le_gd = zend_register_list_destructors_ex(php_free_gd_image, NULL, "gd", module_number);
le_gd_font = zend_register_list_destructors_ex(php_free_gd_font, NULL, "gd font", module_number);
#if HAVE_GD_BUNDLED && HAVE_LIBFREETYPE
gdFontCacheMutexSetup();
#endif
#if HAVE_LIBT1
T1_SetBitmapPad(8);
T1_InitLib(NO_LOGFILE | IGNORE_CONFIGFILE | IGNORE_FONTDATABASE);
T1_SetLogLevel(T1LOG_DEBUG);
le_ps_font = zend_register_list_destructors_ex(php_free_ps_font, NULL, "gd PS font", module_number);
le_ps_enc = zend_register_list_destructors_ex(php_free_ps_enc, NULL, "gd PS encoding", module_number);
#endif
#ifndef HAVE_GD_BUNDLED
gdSetErrorMethod(php_gd_error_method);
#endif
REGISTER_INI_ENTRIES();
REGISTER_LONG_CONSTANT("IMG_GIF", 1, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_JPG", 2, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_JPEG", 2, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_PNG", 4, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_WBMP", 8, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_XPM", 16, CONST_CS | CONST_PERSISTENT);
/* special colours for gd */
REGISTER_LONG_CONSTANT("IMG_COLOR_TILED", gdTiled, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_COLOR_STYLED", gdStyled, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_COLOR_BRUSHED", gdBrushed, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_COLOR_STYLEDBRUSHED", gdStyledBrushed, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_COLOR_TRANSPARENT", gdTransparent, CONST_CS | CONST_PERSISTENT);
/* for imagefilledarc */
REGISTER_LONG_CONSTANT("IMG_ARC_ROUNDED", gdArc, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_ARC_PIE", gdPie, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_ARC_CHORD", gdChord, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_ARC_NOFILL", gdNoFill, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_ARC_EDGED", gdEdged, CONST_CS | CONST_PERSISTENT);
/* GD2 image format types */
REGISTER_LONG_CONSTANT("IMG_GD2_RAW", GD2_FMT_RAW, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_GD2_COMPRESSED", GD2_FMT_COMPRESSED, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FLIP_HORIZONTAL", GD_FLIP_HORINZONTAL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FLIP_VERTICAL", GD_FLIP_VERTICAL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FLIP_BOTH", GD_FLIP_BOTH, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_EFFECT_REPLACE", gdEffectReplace, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_EFFECT_ALPHABLEND", gdEffectAlphaBlend, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_EFFECT_NORMAL", gdEffectNormal, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_EFFECT_OVERLAY", gdEffectOverlay, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CROP_DEFAULT", GD_CROP_DEFAULT, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CROP_TRANSPARENT", GD_CROP_TRANSPARENT, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CROP_BLACK", GD_CROP_BLACK, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CROP_WHITE", GD_CROP_WHITE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CROP_SIDES", GD_CROP_SIDES, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CROP_THRESHOLD", GD_CROP_THRESHOLD, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BELL", GD_BELL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BESSEL", GD_BESSEL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BILINEAR_FIXED", GD_BILINEAR_FIXED, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BICUBIC", GD_BICUBIC, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BICUBIC_FIXED", GD_BICUBIC_FIXED, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BLACKMAN", GD_BLACKMAN, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BOX", GD_BOX, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_BSPLINE", GD_BSPLINE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_CATMULLROM", GD_CATMULLROM, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_GAUSSIAN", GD_GAUSSIAN, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_GENERALIZED_CUBIC", GD_GENERALIZED_CUBIC, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_HERMITE", GD_HERMITE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_HAMMING", GD_HAMMING, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_HANNING", GD_HANNING, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_MITCHELL", GD_MITCHELL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_POWER", GD_POWER, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_QUADRATIC", GD_QUADRATIC, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_SINC", GD_SINC, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_NEAREST_NEIGHBOUR", GD_NEAREST_NEIGHBOUR, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_WEIGHTED4", GD_WEIGHTED4, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_TRIANGLE", GD_TRIANGLE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_AFFINE_TRANSLATE", GD_AFFINE_TRANSLATE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_AFFINE_SCALE", GD_AFFINE_SCALE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_AFFINE_ROTATE", GD_AFFINE_ROTATE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_AFFINE_SHEAR_HORIZONTAL", GD_AFFINE_SHEAR_HORIZONTAL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_AFFINE_SHEAR_VERTICAL", GD_AFFINE_SHEAR_VERTICAL, CONST_CS | CONST_PERSISTENT);
#if defined(HAVE_GD_BUNDLED)
REGISTER_LONG_CONSTANT("GD_BUNDLED", 1, CONST_CS | CONST_PERSISTENT);
#else
REGISTER_LONG_CONSTANT("GD_BUNDLED", 0, CONST_CS | CONST_PERSISTENT);
#endif
/* Section Filters */
REGISTER_LONG_CONSTANT("IMG_FILTER_NEGATE", IMAGE_FILTER_NEGATE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_GRAYSCALE", IMAGE_FILTER_GRAYSCALE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_BRIGHTNESS", IMAGE_FILTER_BRIGHTNESS, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_CONTRAST", IMAGE_FILTER_CONTRAST, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_COLORIZE", IMAGE_FILTER_COLORIZE, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_EDGEDETECT", IMAGE_FILTER_EDGEDETECT, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_GAUSSIAN_BLUR", IMAGE_FILTER_GAUSSIAN_BLUR, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_SELECTIVE_BLUR", IMAGE_FILTER_SELECTIVE_BLUR, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_EMBOSS", IMAGE_FILTER_EMBOSS, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_MEAN_REMOVAL", IMAGE_FILTER_MEAN_REMOVAL, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_SMOOTH", IMAGE_FILTER_SMOOTH, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("IMG_FILTER_PIXELATE", IMAGE_FILTER_PIXELATE, CONST_CS | CONST_PERSISTENT);
/* End Section Filters */
#ifdef GD_VERSION_STRING
REGISTER_STRING_CONSTANT("GD_VERSION", GD_VERSION_STRING, CONST_CS | CONST_PERSISTENT);
#endif
#if defined(GD_MAJOR_VERSION) && defined(GD_MINOR_VERSION) && defined(GD_RELEASE_VERSION) && defined(GD_EXTRA_VERSION)
REGISTER_LONG_CONSTANT("GD_MAJOR_VERSION", GD_MAJOR_VERSION, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("GD_MINOR_VERSION", GD_MINOR_VERSION, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("GD_RELEASE_VERSION", GD_RELEASE_VERSION, CONST_CS | CONST_PERSISTENT);
REGISTER_STRING_CONSTANT("GD_EXTRA_VERSION", GD_EXTRA_VERSION, CONST_CS | CONST_PERSISTENT);
#endif
#ifdef HAVE_GD_PNG
/*
* cannot include #include "png.h"
* /usr/include/pngconf.h:310:2: error: #error png.h already includes setjmp.h with some additional fixup.
* as error, use the values for now...
*/
REGISTER_LONG_CONSTANT("PNG_NO_FILTER", 0x00, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("PNG_FILTER_NONE", 0x08, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("PNG_FILTER_SUB", 0x10, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("PNG_FILTER_UP", 0x20, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("PNG_FILTER_AVG", 0x40, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("PNG_FILTER_PAETH", 0x80, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("PNG_ALL_FILTERS", 0x08 | 0x10 | 0x20 | 0x40 | 0x80, CONST_CS | CONST_PERSISTENT);
#endif
return SUCCESS;
}
/* }}} */
/* {{{ PHP_RSHUTDOWN_FUNCTION
*/
#if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE
PHP_RSHUTDOWN_FUNCTION(gd)
{
gdFontCacheShutdown();
return SUCCESS;
}
#endif
/* }}} */
#if defined(HAVE_GD_BUNDLED)
#define PHP_GD_VERSION_STRING "bundled (2.1.0 compatible)"
#else
# define PHP_GD_VERSION_STRING GD_VERSION_STRING
#endif
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(gd)
{
php_info_print_table_start();
php_info_print_table_row(2, "GD Support", "enabled");
/* need to use a PHPAPI function here because it is external module in windows */
php_info_print_table_row(2, "GD Version", PHP_GD_VERSION_STRING);
#ifdef ENABLE_GD_TTF
php_info_print_table_row(2, "FreeType Support", "enabled");
#if HAVE_LIBFREETYPE
php_info_print_table_row(2, "FreeType Linkage", "with freetype");
{
char tmp[256];
#ifdef FREETYPE_PATCH
snprintf(tmp, sizeof(tmp), "%d.%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR, FREETYPE_PATCH);
#elif defined(FREETYPE_MAJOR)
snprintf(tmp, sizeof(tmp), "%d.%d", FREETYPE_MAJOR, FREETYPE_MINOR);
#else
snprintf(tmp, sizeof(tmp), "1.x");
#endif
php_info_print_table_row(2, "FreeType Version", tmp);
}
#else
php_info_print_table_row(2, "FreeType Linkage", "with unknown library");
#endif
#endif
#ifdef HAVE_LIBT1
php_info_print_table_row(2, "T1Lib Support", "enabled");
#endif
php_info_print_table_row(2, "GIF Read Support", "enabled");
php_info_print_table_row(2, "GIF Create Support", "enabled");
#ifdef HAVE_GD_JPG
{
php_info_print_table_row(2, "JPEG Support", "enabled");
php_info_print_table_row(2, "libJPEG Version", gdJpegGetVersionString());
}
#endif
#ifdef HAVE_GD_PNG
php_info_print_table_row(2, "PNG Support", "enabled");
php_info_print_table_row(2, "libPNG Version", gdPngGetVersionString());
#endif
php_info_print_table_row(2, "WBMP Support", "enabled");
#if defined(HAVE_GD_XPM)
php_info_print_table_row(2, "XPM Support", "enabled");
{
char tmp[12];
snprintf(tmp, sizeof(tmp), "%d", XpmLibraryVersion());
php_info_print_table_row(2, "libXpm Version", tmp);
}
#endif
php_info_print_table_row(2, "XBM Support", "enabled");
#if defined(USE_GD_JISX0208)
php_info_print_table_row(2, "JIS-mapped Japanese Font Support", "enabled");
#endif
#ifdef HAVE_GD_WEBP
php_info_print_table_row(2, "WebP Support", "enabled");
#endif
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
/* }}} */
/* {{{ proto array gd_info()
*/
PHP_FUNCTION(gd_info)
{
if (zend_parse_parameters_none() == FAILURE) {
RETURN_FALSE;
}
array_init(return_value);
add_assoc_string(return_value, "GD Version", PHP_GD_VERSION_STRING, 1);
#ifdef ENABLE_GD_TTF
add_assoc_bool(return_value, "FreeType Support", 1);
#if HAVE_LIBFREETYPE
add_assoc_string(return_value, "FreeType Linkage", "with freetype", 1);
#else
add_assoc_string(return_value, "FreeType Linkage", "with unknown library", 1);
#endif
#else
add_assoc_bool(return_value, "FreeType Support", 0);
#endif
#ifdef HAVE_LIBT1
add_assoc_bool(return_value, "T1Lib Support", 1);
#else
add_assoc_bool(return_value, "T1Lib Support", 0);
#endif
add_assoc_bool(return_value, "GIF Read Support", 1);
add_assoc_bool(return_value, "GIF Create Support", 1);
#ifdef HAVE_GD_JPG
add_assoc_bool(return_value, "JPEG Support", 1);
#else
add_assoc_bool(return_value, "JPEG Support", 0);
#endif
#ifdef HAVE_GD_PNG
add_assoc_bool(return_value, "PNG Support", 1);
#else
add_assoc_bool(return_value, "PNG Support", 0);
#endif
add_assoc_bool(return_value, "WBMP Support", 1);
#if defined(HAVE_GD_XPM)
add_assoc_bool(return_value, "XPM Support", 1);
#else
add_assoc_bool(return_value, "XPM Support", 0);
#endif
add_assoc_bool(return_value, "XBM Support", 1);
#if defined(USE_GD_JISX0208)
add_assoc_bool(return_value, "JIS-mapped Japanese Font Support", 1);
#else
add_assoc_bool(return_value, "JIS-mapped Japanese Font Support", 0);
#endif
}
/* }}} */
/* Need this for cpdf. See also comment in file.c php3i_get_le_fp() */
PHP_GD_API int phpi_get_le_gd(void)
{
return le_gd;
}
/* }}} */
#define FLIPWORD(a) (((a & 0xff000000) >> 24) | ((a & 0x00ff0000) >> 8) | ((a & 0x0000ff00) << 8) | ((a & 0x000000ff) << 24))
/* {{{ proto int imageloadfont(string filename)
Load a new font */
PHP_FUNCTION(imageloadfont)
{
char *file;
int file_name, hdr_size = sizeof(gdFont) - sizeof(char *);
int ind, body_size, n = 0, b, i, body_size_check;
gdFontPtr font;
php_stream *stream;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &file, &file_name) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(file, "rb", IGNORE_PATH | IGNORE_URL_WIN | REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
/* Only supports a architecture-dependent binary dump format
* at the moment.
* The file format is like this on machines with 32-byte integers:
*
* byte 0-3: (int) number of characters in the font
* byte 4-7: (int) value of first character in the font (often 32, space)
* byte 8-11: (int) pixel width of each character
* byte 12-15: (int) pixel height of each character
* bytes 16-: (char) array with character data, one byte per pixel
* in each character, for a total of
* (nchars*width*height) bytes.
*/
font = (gdFontPtr) emalloc(sizeof(gdFont));
b = 0;
while (b < hdr_size && (n = php_stream_read(stream, (char*)&font[b], hdr_size - b))) {
b += n;
}
if (!n) {
efree(font);
if (php_stream_eof(stream)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "End of file while reading header");
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error while reading header");
}
php_stream_close(stream);
RETURN_FALSE;
}
i = php_stream_tell(stream);
php_stream_seek(stream, 0, SEEK_END);
body_size_check = php_stream_tell(stream) - hdr_size;
php_stream_seek(stream, i, SEEK_SET);
body_size = font->w * font->h * font->nchars;
if (body_size != body_size_check) {
font->w = FLIPWORD(font->w);
font->h = FLIPWORD(font->h);
font->nchars = FLIPWORD(font->nchars);
body_size = font->w * font->h * font->nchars;
}
if (overflow2(font->nchars, font->h) || overflow2(font->nchars * font->h, font->w )) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error reading font, invalid font header");
efree(font);
php_stream_close(stream);
RETURN_FALSE;
}
if (body_size != body_size_check) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error reading font");
efree(font);
php_stream_close(stream);
RETURN_FALSE;
}
font->data = emalloc(body_size);
b = 0;
while (b < body_size && (n = php_stream_read(stream, &font->data[b], body_size - b))) {
b += n;
}
if (!n) {
efree(font->data);
efree(font);
if (php_stream_eof(stream)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "End of file while reading body");
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Error while reading body");
}
php_stream_close(stream);
RETURN_FALSE;
}
php_stream_close(stream);
/* Adding 5 to the font index so we will never have font indices
* that overlap with the old fonts (with indices 1-5). The first
* list index given out is always 1.
*/
ind = 5 + zend_list_insert(font, le_gd_font TSRMLS_CC);
RETURN_LONG(ind);
}
/* }}} */
/* {{{ proto bool imagesetstyle(resource im, array styles)
Set the line drawing styles for use with imageline and IMG_COLOR_STYLED. */
PHP_FUNCTION(imagesetstyle)
{
zval *IM, *styles;
gdImagePtr im;
int * stylearr;
int index;
HashPosition pos;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ra", &IM, &styles) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
/* copy the style values in the stylearr */
stylearr = safe_emalloc(sizeof(int), zend_hash_num_elements(HASH_OF(styles)), 0);
zend_hash_internal_pointer_reset_ex(HASH_OF(styles), &pos);
for (index = 0;; zend_hash_move_forward_ex(HASH_OF(styles), &pos)) {
zval ** item;
if (zend_hash_get_current_data_ex(HASH_OF(styles), (void **) &item, &pos) == FAILURE) {
break;
}
if (Z_TYPE_PP(item) != IS_LONG) {
zval lval;
lval = **item;
zval_copy_ctor(&lval);
convert_to_long(&lval);
stylearr[index++] = Z_LVAL(lval);
} else {
stylearr[index++] = Z_LVAL_PP(item);
}
}
gdImageSetStyle(im, stylearr, index);
efree(stylearr);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto resource imagecreatetruecolor(int x_size, int y_size)
Create a new true color image */
PHP_FUNCTION(imagecreatetruecolor)
{
long x_size, y_size;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ll", &x_size, &y_size) == FAILURE) {
return;
}
if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions");
RETURN_FALSE;
}
im = gdImageCreateTrueColor(x_size, y_size);
if (!im) {
RETURN_FALSE;
}
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
}
/* }}} */
/* {{{ proto bool imageistruecolor(resource im)
return true if the image uses truecolor */
PHP_FUNCTION(imageistruecolor)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_BOOL(im->trueColor);
}
/* }}} */
/* {{{ proto void imagetruecolortopalette(resource im, bool ditherFlag, int colorsWanted)
Convert a true colour image to a palette based image with a number of colours, optionally using dithering. */
PHP_FUNCTION(imagetruecolortopalette)
{
zval *IM;
zend_bool dither;
long ncolors;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rbl", &IM, &dither, &ncolors) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (ncolors <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Number of colors has to be greater than zero");
RETURN_FALSE;
}
gdImageTrueColorToPalette(im, dither, ncolors);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto void imagetruecolortopalette(resource im, bool ditherFlag, int colorsWanted)
Convert a true colour image to a palette based image with a number of colours, optionally using dithering. */
PHP_FUNCTION(imagepalettetotruecolor)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (gdImagePaletteToTrueColor(im) == 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagecolormatch(resource im1, resource im2)
Makes the colors of the palette version of an image more closely match the true color version */
PHP_FUNCTION(imagecolormatch)
{
zval *IM1, *IM2;
gdImagePtr im1, im2;
int result;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &IM1, &IM2) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im1, gdImagePtr, &IM1, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(im2, gdImagePtr, &IM2, -1, "Image", le_gd);
result = gdImageColorMatch(im1, im2);
switch (result) {
case -1:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image1 must be TrueColor" );
RETURN_FALSE;
break;
case -2:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image2 must be Palette" );
RETURN_FALSE;
break;
case -3:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image1 and Image2 must be the same size" );
RETURN_FALSE;
break;
case -4:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Image2 must have at least one color" );
RETURN_FALSE;
break;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagesetthickness(resource im, int thickness)
Set line thickness for drawing lines, ellipses, rectangles, polygons etc. */
PHP_FUNCTION(imagesetthickness)
{
zval *IM;
long thick;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &thick) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageSetThickness(im, thick);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagefilledellipse(resource im, int cx, int cy, int w, int h, int color)
Draw an ellipse */
PHP_FUNCTION(imagefilledellipse)
{
zval *IM;
long cx, cy, w, h, color;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &cx, &cy, &w, &h, &color) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageFilledEllipse(im, cx, cy, w, h, color);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagefilledarc(resource im, int cx, int cy, int w, int h, int s, int e, int col, int style)
Draw a filled partial ellipse */
PHP_FUNCTION(imagefilledarc)
{
zval *IM;
long cx, cy, w, h, ST, E, col, style;
gdImagePtr im;
int e, st;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col, &style) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
e = E;
if (e < 0) {
e %= 360;
}
st = ST;
if (st < 0) {
st %= 360;
}
gdImageFilledArc(im, cx, cy, w, h, st, e, col, style);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagealphablending(resource im, bool on)
Turn alpha blending mode on or off for the given image */
PHP_FUNCTION(imagealphablending)
{
zval *IM;
zend_bool blend;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rb", &IM, &blend) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageAlphaBlending(im, blend);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagesavealpha(resource im, bool on)
Include alpha channel to a saved image */
PHP_FUNCTION(imagesavealpha)
{
zval *IM;
zend_bool save;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rb", &IM, &save) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageSaveAlpha(im, save);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagelayereffect(resource im, int effect)
Set the alpha blending flag to use the bundled libgd layering effects */
PHP_FUNCTION(imagelayereffect)
{
zval *IM;
long effect;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &effect) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageAlphaBlending(im, effect);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto int imagecolorallocatealpha(resource im, int red, int green, int blue, int alpha)
Allocate a color with an alpha level. Works for true color and palette based images */
PHP_FUNCTION(imagecolorallocatealpha)
{
zval *IM;
long red, green, blue, alpha;
gdImagePtr im;
int ct = (-1);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
ct = gdImageColorAllocateAlpha(im, red, green, blue, alpha);
if (ct < 0) {
RETURN_FALSE;
}
RETURN_LONG((long)ct);
}
/* }}} */
/* {{{ proto int imagecolorresolvealpha(resource im, int red, int green, int blue, int alpha)
Resolve/Allocate a colour with an alpha level. Works for true colour and palette based images */
PHP_FUNCTION(imagecolorresolvealpha)
{
zval *IM;
long red, green, blue, alpha;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorResolveAlpha(im, red, green, blue, alpha));
}
/* }}} */
/* {{{ proto int imagecolorclosestalpha(resource im, int red, int green, int blue, int alpha)
Find the closest matching colour with alpha transparency */
PHP_FUNCTION(imagecolorclosestalpha)
{
zval *IM;
long red, green, blue, alpha;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorClosestAlpha(im, red, green, blue, alpha));
}
/* }}} */
/* {{{ proto int imagecolorexactalpha(resource im, int red, int green, int blue, int alpha)
Find exact match for colour with transparency */
PHP_FUNCTION(imagecolorexactalpha)
{
zval *IM;
long red, green, blue, alpha;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorExactAlpha(im, red, green, blue, alpha));
}
/* }}} */
/* {{{ proto bool imagecopyresampled(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int dst_w, int dst_h, int src_w, int src_h)
Copy and resize part of an image using resampling to help ensure clarity */
PHP_FUNCTION(imagecopyresampled)
{
zval *SIM, *DIM;
long SX, SY, SW, SH, DX, DY, DW, DH;
gdImagePtr im_dst, im_src;
int srcH, srcW, dstH, dstW, srcY, srcX, dstY, dstX;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrllllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &DW, &DH, &SW, &SH) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
srcX = SX;
srcY = SY;
srcH = SH;
srcW = SW;
dstX = DX;
dstY = DY;
dstH = DH;
dstW = DW;
gdImageCopyResampled(im_dst, im_src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH);
RETURN_TRUE;
}
/* }}} */
#ifdef PHP_WIN32
/* {{{ proto resource imagegrabwindow(int window_handle [, int client_area])
Grab a window or its client area using a windows handle (HWND property in COM instance) */
PHP_FUNCTION(imagegrabwindow)
{
HWND window;
long client_area = 0;
RECT rc = {0};
RECT rc_win = {0};
int Width, Height;
HDC hdc;
HDC memDC;
HBITMAP memBM;
HBITMAP hOld;
HINSTANCE handle;
long lwindow_handle;
typedef BOOL (WINAPI *tPrintWindow)(HWND, HDC,UINT);
tPrintWindow pPrintWindow = 0;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &lwindow_handle, &client_area) == FAILURE) {
RETURN_FALSE;
}
window = (HWND) lwindow_handle;
if (!IsWindow(window)) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Invalid window handle");
RETURN_FALSE;
}
hdc = GetDC(0);
if (client_area) {
GetClientRect(window, &rc);
Width = rc.right;
Height = rc.bottom;
} else {
GetWindowRect(window, &rc);
Width = rc.right - rc.left;
Height = rc.bottom - rc.top;
}
Width = (Width/4)*4;
memDC = CreateCompatibleDC(hdc);
memBM = CreateCompatibleBitmap(hdc, Width, Height);
hOld = (HBITMAP) SelectObject (memDC, memBM);
handle = LoadLibrary("User32.dll");
if ( handle == 0 ) {
goto clean;
}
pPrintWindow = (tPrintWindow) GetProcAddress(handle, "PrintWindow");
if ( pPrintWindow ) {
pPrintWindow(window, memDC, (UINT) client_area);
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Windows API too old");
goto clean;
}
FreeLibrary(handle);
im = gdImageCreateTrueColor(Width, Height);
if (im) {
int x,y;
for (y=0; y <= Height; y++) {
for (x=0; x <= Width; x++) {
int c = GetPixel(memDC, x,y);
gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c)));
}
}
}
clean:
SelectObject(memDC,hOld);
DeleteObject(memBM);
DeleteDC(memDC);
ReleaseDC( 0, hdc );
if (!im) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
}
}
/* }}} */
/* {{{ proto resource imagegrabscreen()
Grab a screenshot */
PHP_FUNCTION(imagegrabscreen)
{
HWND window = GetDesktopWindow();
RECT rc = {0};
int Width, Height;
HDC hdc;
HDC memDC;
HBITMAP memBM;
HBITMAP hOld;
typedef BOOL (WINAPI *tPrintWindow)(HWND, HDC,UINT);
tPrintWindow pPrintWindow = 0;
gdImagePtr im;
hdc = GetDC(0);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (!hdc) {
RETURN_FALSE;
}
GetWindowRect(window, &rc);
Width = rc.right - rc.left;
Height = rc.bottom - rc.top;
Width = (Width/4)*4;
memDC = CreateCompatibleDC(hdc);
memBM = CreateCompatibleBitmap(hdc, Width, Height);
hOld = (HBITMAP) SelectObject (memDC, memBM);
BitBlt( memDC, 0, 0, Width, Height , hdc, rc.left, rc.top , SRCCOPY );
im = gdImageCreateTrueColor(Width, Height);
if (im) {
int x,y;
for (y=0; y <= Height; y++) {
for (x=0; x <= Width; x++) {
int c = GetPixel(memDC, x,y);
gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c)));
}
}
}
SelectObject(memDC,hOld);
DeleteObject(memBM);
DeleteDC(memDC);
ReleaseDC( 0, hdc );
if (!im) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
}
}
/* }}} */
#endif /* PHP_WIN32 */
/* {{{ proto resource imagerotate(resource src_im, float angle, int bgdcolor [, int ignoretransparent])
Rotate an image using a custom angle */
PHP_FUNCTION(imagerotate)
{
zval *SIM;
gdImagePtr im_dst, im_src;
double degrees;
long color;
long ignoretransparent = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rdl|l", &SIM, °rees, &color, &ignoretransparent) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
im_dst = gdImageRotateInterpolated(im_src, (float)degrees, color);
if (im_dst != NULL) {
ZEND_REGISTER_RESOURCE(return_value, im_dst, le_gd);
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto bool imagesettile(resource image, resource tile)
Set the tile image to $tile when filling $image with the "IMG_COLOR_TILED" color */
PHP_FUNCTION(imagesettile)
{
zval *IM, *TILE;
gdImagePtr im, tile;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &IM, &TILE) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(tile, gdImagePtr, &TILE, -1, "Image", le_gd);
gdImageSetTile(im, tile);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagesetbrush(resource image, resource brush)
Set the brush image to $brush when filling $image with the "IMG_COLOR_BRUSHED" color */
PHP_FUNCTION(imagesetbrush)
{
zval *IM, *TILE;
gdImagePtr im, tile;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &IM, &TILE) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(tile, gdImagePtr, &TILE, -1, "Image", le_gd);
gdImageSetBrush(im, tile);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto resource imagecreate(int x_size, int y_size)
Create a new image */
PHP_FUNCTION(imagecreate)
{
long x_size, y_size;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ll", &x_size, &y_size) == FAILURE) {
return;
}
if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions");
RETURN_FALSE;
}
im = gdImageCreate(x_size, y_size);
if (!im) {
RETURN_FALSE;
}
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
}
/* }}} */
/* {{{ proto int imagetypes(void)
Return the types of images supported in a bitfield - 1=GIF, 2=JPEG, 4=PNG, 8=WBMP, 16=XPM */
PHP_FUNCTION(imagetypes)
{
int ret=0;
ret = 1;
#ifdef HAVE_GD_JPG
ret |= 2;
#endif
#ifdef HAVE_GD_PNG
ret |= 4;
#endif
ret |= 8;
#if defined(HAVE_GD_XPM)
ret |= 16;
#endif
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(ret);
}
/* }}} */
/* {{{ _php_ctx_getmbi
*/
static int _php_ctx_getmbi(gdIOCtx *ctx)
{
int i, mbi = 0;
do {
i = (ctx->getC)(ctx);
if (i < 0) {
return -1;
}
mbi = (mbi << 7) | (i & 0x7f);
} while (i & 0x80);
return mbi;
}
/* }}} */
/* {{{ _php_image_type
*/
static const char php_sig_gd2[3] = {'g', 'd', '2'};
static int _php_image_type (char data[8])
{
/* Based on ext/standard/image.c */
if (data == NULL) {
return -1;
}
if (!memcmp(data, php_sig_gd2, 3)) {
return PHP_GDIMG_TYPE_GD2;
} else if (!memcmp(data, php_sig_jpg, 3)) {
return PHP_GDIMG_TYPE_JPG;
} else if (!memcmp(data, php_sig_png, 3)) {
if (!memcmp(data, php_sig_png, 8)) {
return PHP_GDIMG_TYPE_PNG;
}
} else if (!memcmp(data, php_sig_gif, 3)) {
return PHP_GDIMG_TYPE_GIF;
}
else {
gdIOCtx *io_ctx;
io_ctx = gdNewDynamicCtxEx(8, data, 0);
if (io_ctx) {
if (_php_ctx_getmbi(io_ctx) == 0 && _php_ctx_getmbi(io_ctx) >= 0) {
io_ctx->gd_free(io_ctx);
return PHP_GDIMG_TYPE_WBM;
} else {
io_ctx->gd_free(io_ctx);
}
}
}
return -1;
}
/* }}} */
/* {{{ _php_image_create_from_string
*/
gdImagePtr _php_image_create_from_string(zval **data, char *tn, gdImagePtr (*ioctx_func_p)() TSRMLS_DC)
{
gdImagePtr im;
gdIOCtx *io_ctx;
io_ctx = gdNewDynamicCtxEx(Z_STRLEN_PP(data), Z_STRVAL_PP(data), 0);
if (!io_ctx) {
return NULL;
}
im = (*ioctx_func_p)(io_ctx);
if (!im) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Passed data is not in '%s' format", tn);
io_ctx->gd_free(io_ctx);
return NULL;
}
io_ctx->gd_free(io_ctx);
return im;
}
/* }}} */
/* {{{ proto resource imagecreatefromstring(string image)
Create a new image from the image stream in the string */
PHP_FUNCTION(imagecreatefromstring)
{
zval **data;
gdImagePtr im;
int imtype;
char sig[8];
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &data) == FAILURE) {
return;
}
convert_to_string_ex(data);
if (Z_STRLEN_PP(data) < 8) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string or invalid image");
RETURN_FALSE;
}
memcpy(sig, Z_STRVAL_PP(data), 8);
imtype = _php_image_type(sig);
switch (imtype) {
case PHP_GDIMG_TYPE_JPG:
#ifdef HAVE_GD_JPG
im = _php_image_create_from_string(data, "JPEG", gdImageCreateFromJpegCtx TSRMLS_CC);
#else
php_error_docref(NULL TSRMLS_CC, E_WARNING, "No JPEG support in this PHP build");
RETURN_FALSE;
#endif
break;
case PHP_GDIMG_TYPE_PNG:
#ifdef HAVE_GD_PNG
im = _php_image_create_from_string(data, "PNG", gdImageCreateFromPngCtx TSRMLS_CC);
#else
php_error_docref(NULL TSRMLS_CC, E_WARNING, "No PNG support in this PHP build");
RETURN_FALSE;
#endif
break;
case PHP_GDIMG_TYPE_GIF:
im = _php_image_create_from_string(data, "GIF", gdImageCreateFromGifCtx TSRMLS_CC);
break;
case PHP_GDIMG_TYPE_WBM:
im = _php_image_create_from_string(data, "WBMP", gdImageCreateFromWBMPCtx TSRMLS_CC);
break;
case PHP_GDIMG_TYPE_GD2:
im = _php_image_create_from_string(data, "GD2", gdImageCreateFromGd2Ctx TSRMLS_CC);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Data is not in a recognized format");
RETURN_FALSE;
}
if (!im) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't create GD Image Stream out of Data");
RETURN_FALSE;
}
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
}
/* }}} */
/* {{{ _php_image_create_from
*/
static void _php_image_create_from(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, gdImagePtr (*func_p)(), gdImagePtr (*ioctx_func_p)())
{
char *file;
int file_len;
long srcx, srcy, width, height;
gdImagePtr im = NULL;
php_stream *stream;
FILE * fp = NULL;
long ignore_warning;
if (image_type == PHP_GDIMG_TYPE_GD2PART) {
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sllll", &file, &file_len, &srcx, &srcy, &width, &height) == FAILURE) {
return;
}
if (width < 1 || height < 1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Zero width or height not allowed");
RETURN_FALSE;
}
} else {
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &file, &file_len) == FAILURE) {
return;
}
}
stream = php_stream_open_wrapper(file, "rb", REPORT_ERRORS|IGNORE_PATH|IGNORE_URL_WIN, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
/* try and avoid allocating a FILE* if the stream is not naturally a FILE* */
if (php_stream_is(stream, PHP_STREAM_IS_STDIO)) {
if (FAILURE == php_stream_cast(stream, PHP_STREAM_AS_STDIO, (void**)&fp, REPORT_ERRORS)) {
goto out_err;
}
} else if (ioctx_func_p) {
/* we can create an io context */
gdIOCtx* io_ctx;
size_t buff_size;
char *buff;
/* needs to be malloc (persistent) - GD will free() it later */
buff_size = php_stream_copy_to_mem(stream, &buff, PHP_STREAM_COPY_ALL, 1);
if (!buff_size) {
php_error_docref(NULL TSRMLS_CC, E_WARNING,"Cannot read image data");
goto out_err;
}
io_ctx = gdNewDynamicCtxEx(buff_size, buff, 0);
if (!io_ctx) {
pefree(buff, 1);
php_error_docref(NULL TSRMLS_CC, E_WARNING,"Cannot allocate GD IO context");
goto out_err;
}
if (image_type == PHP_GDIMG_TYPE_GD2PART) {
im = (*ioctx_func_p)(io_ctx, srcx, srcy, width, height);
} else {
im = (*ioctx_func_p)(io_ctx);
}
io_ctx->gd_free(io_ctx);
pefree(buff, 1);
}
else if (php_stream_can_cast(stream, PHP_STREAM_AS_STDIO)) {
/* try and force the stream to be FILE* */
if (FAILURE == php_stream_cast(stream, PHP_STREAM_AS_STDIO | PHP_STREAM_CAST_TRY_HARD, (void **) &fp, REPORT_ERRORS)) {
goto out_err;
}
}
if (!im && fp) {
switch (image_type) {
case PHP_GDIMG_TYPE_GD2PART:
im = (*func_p)(fp, srcx, srcy, width, height);
break;
#if defined(HAVE_GD_XPM)
case PHP_GDIMG_TYPE_XPM:
im = gdImageCreateFromXpm(file);
break;
#endif
#ifdef HAVE_GD_JPG
case PHP_GDIMG_TYPE_JPG:
ignore_warning = INI_INT("gd.jpeg_ignore_warning");
im = gdImageCreateFromJpegEx(fp, ignore_warning);
break;
#endif
default:
im = (*func_p)(fp);
break;
}
fflush(fp);
}
/* register_im: */
if (im) {
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
php_stream_close(stream);
return;
}
php_error_docref(NULL TSRMLS_CC, E_WARNING, "'%s' is not a valid %s file", file, tn);
out_err:
php_stream_close(stream);
RETURN_FALSE;
}
/* }}} */
/* {{{ proto resource imagecreatefromgif(string filename)
Create a new image from GIF file or URL */
PHP_FUNCTION(imagecreatefromgif)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GIF, "GIF", gdImageCreateFromGif, gdImageCreateFromGifCtx);
}
/* }}} */
#ifdef HAVE_GD_JPG
/* {{{ proto resource imagecreatefromjpeg(string filename)
Create a new image from JPEG file or URL */
PHP_FUNCTION(imagecreatefromjpeg)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageCreateFromJpeg, gdImageCreateFromJpegCtx);
}
/* }}} */
#endif /* HAVE_GD_JPG */
#ifdef HAVE_GD_PNG
/* {{{ proto resource imagecreatefrompng(string filename)
Create a new image from PNG file or URL */
PHP_FUNCTION(imagecreatefrompng)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG, "PNG", gdImageCreateFromPng, gdImageCreateFromPngCtx);
}
/* }}} */
#endif /* HAVE_GD_PNG */
#ifdef HAVE_GD_WEBP
/* {{{ proto resource imagecreatefromwebp(string filename)
Create a new image from WEBP file or URL */
PHP_FUNCTION(imagecreatefromwebp)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WEBP, "WEBP", gdImageCreateFromWebp, gdImageCreateFromWebpCtx);
}
/* }}} */
#endif /* HAVE_GD_VPX */
/* {{{ proto resource imagecreatefromxbm(string filename)
Create a new image from XBM file or URL */
PHP_FUNCTION(imagecreatefromxbm)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageCreateFromXbm, NULL);
}
/* }}} */
#if defined(HAVE_GD_XPM)
/* {{{ proto resource imagecreatefromxpm(string filename)
Create a new image from XPM file or URL */
PHP_FUNCTION(imagecreatefromxpm)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XPM, "XPM", gdImageCreateFromXpm, NULL);
}
/* }}} */
#endif
/* {{{ proto resource imagecreatefromwbmp(string filename)
Create a new image from WBMP file or URL */
PHP_FUNCTION(imagecreatefromwbmp)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WBM, "WBMP", gdImageCreateFromWBMP, gdImageCreateFromWBMPCtx);
}
/* }}} */
/* {{{ proto resource imagecreatefromgd(string filename)
Create a new image from GD file or URL */
PHP_FUNCTION(imagecreatefromgd)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD, "GD", gdImageCreateFromGd, gdImageCreateFromGdCtx);
}
/* }}} */
/* {{{ proto resource imagecreatefromgd2(string filename)
Create a new image from GD2 file or URL */
PHP_FUNCTION(imagecreatefromgd2)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2, "GD2", gdImageCreateFromGd2, gdImageCreateFromGd2Ctx);
}
/* }}} */
/* {{{ proto resource imagecreatefromgd2part(string filename, int srcX, int srcY, int width, int height)
Create a new image from a given part of GD2 file or URL */
PHP_FUNCTION(imagecreatefromgd2part)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2PART, "GD2", gdImageCreateFromGd2Part, gdImageCreateFromGd2PartCtx);
}
/* }}} */
/* {{{ _php_image_output
*/
static void _php_image_output(INTERNAL_FUNCTION_PARAMETERS, int image_type, char *tn, void (*func_p)())
{
zval *imgind;
char *file = NULL;
long quality = 0, type = 0;
gdImagePtr im;
char *fn = NULL;
FILE *fp;
int file_len = 0, argc = ZEND_NUM_ARGS();
int q = -1, i, t = 1;
/* The quality parameter for Wbmp stands for the threshold when called from image2wbmp() */
/* When called from imagewbmp() the quality parameter stands for the foreground color. Default: black. */
/* The quality parameter for gd2 stands for chunk size */
if (zend_parse_parameters(argc TSRMLS_CC, "r|pll", &imgind, &file, &file_len, &quality, &type) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &imgind, -1, "Image", le_gd);
if (argc > 1) {
fn = file;
if (argc == 3) {
q = quality;
}
if (argc == 4) {
t = type;
}
}
if (argc >= 2 && file_len) {
PHP_GD_CHECK_OPEN_BASEDIR(fn, "Invalid filename");
fp = VCWD_FOPEN(fn, "wb");
if (!fp) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' for writing", fn);
RETURN_FALSE;
}
switch (image_type) {
case PHP_GDIMG_CONVERT_WBM:
if (q == -1) {
q = 0;
} else if (q < 0 || q > 255) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid threshold value '%d'. It must be between 0 and 255", q);
q = 0;
}
gdImageWBMP(im, q, fp);
break;
case PHP_GDIMG_TYPE_JPG:
(*func_p)(im, fp, q);
break;
case PHP_GDIMG_TYPE_WBM:
for (i = 0; i < gdImageColorsTotal(im); i++) {
if (gdImageRed(im, i) == 0) break;
}
(*func_p)(im, i, fp);
break;
case PHP_GDIMG_TYPE_GD:
if (im->trueColor){
gdImageTrueColorToPalette(im,1,256);
}
(*func_p)(im, fp);
break;
case PHP_GDIMG_TYPE_GD2:
if (q == -1) {
q = 128;
}
(*func_p)(im, fp, q, t);
break;
default:
if (q == -1) {
q = 128;
}
(*func_p)(im, fp, q, t);
break;
}
fflush(fp);
fclose(fp);
} else {
int b;
FILE *tmp;
char buf[4096];
char *path;
tmp = php_open_temporary_file(NULL, NULL, &path TSRMLS_CC);
if (tmp == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open temporary file");
RETURN_FALSE;
}
switch (image_type) {
case PHP_GDIMG_CONVERT_WBM:
if (q == -1) {
q = 0;
} else if (q < 0 || q > 255) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid threshold value '%d'. It must be between 0 and 255", q);
q = 0;
}
gdImageWBMP(im, q, tmp);
break;
case PHP_GDIMG_TYPE_JPG:
(*func_p)(im, tmp, q);
break;
case PHP_GDIMG_TYPE_WBM:
for (i = 0; i < gdImageColorsTotal(im); i++) {
if (gdImageRed(im, i) == 0) {
break;
}
}
(*func_p)(im, q, tmp);
break;
case PHP_GDIMG_TYPE_GD:
if (im->trueColor) {
gdImageTrueColorToPalette(im,1,256);
}
(*func_p)(im, tmp);
break;
case PHP_GDIMG_TYPE_GD2:
if (q == -1) {
q = 128;
}
(*func_p)(im, tmp, q, t);
break;
default:
(*func_p)(im, tmp);
break;
}
fseek(tmp, 0, SEEK_SET);
#if APACHE && defined(CHARSET_EBCDIC)
/* XXX this is unlikely to work any more thies@thieso.net */
/* This is a binary file already: avoid EBCDIC->ASCII conversion */
ap_bsetflag(php3_rqst->connection->client, B_EBCDIC2ASCII, 0);
#endif
while ((b = fread(buf, 1, sizeof(buf), tmp)) > 0) {
php_write(buf, b TSRMLS_CC);
}
fclose(tmp);
VCWD_UNLINK((const char *)path); /* make sure that the temporary file is removed */
efree(path);
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto int imagexbm(int im, string filename [, int foreground])
Output XBM image to browser or file */
PHP_FUNCTION(imagexbm)
{
_php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageXbmCtx);
}
/* }}} */
/* {{{ proto bool imagegif(resource im [, string filename])
Output GIF image to browser or file */
PHP_FUNCTION(imagegif)
{
_php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GIF, "GIF", gdImageGifCtx);
}
/* }}} */
#ifdef HAVE_GD_PNG
/* {{{ proto bool imagepng(resource im [, string filename])
Output PNG image to browser or file */
PHP_FUNCTION(imagepng)
{
_php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG, "PNG", gdImagePngCtxEx);
}
/* }}} */
#endif /* HAVE_GD_PNG */
#ifdef HAVE_GD_WEBP
/* {{{ proto bool imagewebp(resource im [, string filename[, quality]] )
Output WEBP image to browser or file */
PHP_FUNCTION(imagewebp)
{
_php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WEBP, "WEBP", gdImageWebpCtx);
}
/* }}} */
#endif /* HAVE_GD_WEBP */
#ifdef HAVE_GD_JPG
/* {{{ proto bool imagejpeg(resource im [, string filename [, int quality]])
Output JPEG image to browser or file */
PHP_FUNCTION(imagejpeg)
{
_php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG, "JPEG", gdImageJpegCtx);
}
/* }}} */
#endif /* HAVE_GD_JPG */
/* {{{ proto bool imagewbmp(resource im [, string filename, [, int foreground]])
Output WBMP image to browser or file */
PHP_FUNCTION(imagewbmp)
{
_php_image_output_ctx(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_WBM, "WBMP", gdImageWBMPCtx);
}
/* }}} */
/* {{{ proto bool imagegd(resource im [, string filename])
Output GD image to browser or file */
PHP_FUNCTION(imagegd)
{
_php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD, "GD", gdImageGd);
}
/* }}} */
/* {{{ proto bool imagegd2(resource im [, string filename, [, int chunk_size, [, int type]]])
Output GD2 image to browser or file */
PHP_FUNCTION(imagegd2)
{
_php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_GD2, "GD2", gdImageGd2);
}
/* }}} */
/* {{{ proto bool imagedestroy(resource im)
Destroy an image */
PHP_FUNCTION(imagedestroy)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
zend_list_delete(Z_LVAL_P(IM));
RETURN_TRUE;
}
/* }}} */
/* {{{ proto int imagecolorallocate(resource im, int red, int green, int blue)
Allocate a color for an image */
PHP_FUNCTION(imagecolorallocate)
{
zval *IM;
long red, green, blue;
gdImagePtr im;
int ct = (-1);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
ct = gdImageColorAllocate(im, red, green, blue);
if (ct < 0) {
RETURN_FALSE;
}
RETURN_LONG(ct);
}
/* }}} */
/* {{{ proto void imagepalettecopy(resource dst, resource src)
Copy the palette from the src image onto the dst image */
PHP_FUNCTION(imagepalettecopy)
{
zval *dstim, *srcim;
gdImagePtr dst, src;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &dstim, &srcim) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(dst, gdImagePtr, &dstim, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(src, gdImagePtr, &srcim, -1, "Image", le_gd);
gdImagePaletteCopy(dst, src);
}
/* }}} */
/* {{{ proto int imagecolorat(resource im, int x, int y)
Get the index of the color of a pixel */
PHP_FUNCTION(imagecolorat)
{
zval *IM;
long x, y;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rll", &IM, &x, &y) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (gdImageTrueColor(im)) {
if (im->tpixels && gdImageBoundsSafe(im, x, y)) {
RETURN_LONG(gdImageTrueColorPixel(im, x, y));
} else {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%ld,%ld is out of bounds", x, y);
RETURN_FALSE;
}
} else {
if (im->pixels && gdImageBoundsSafe(im, x, y)) {
RETURN_LONG(im->pixels[y][x]);
} else {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%ld,%ld is out of bounds", x, y);
RETURN_FALSE;
}
}
}
/* }}} */
/* {{{ proto int imagecolorclosest(resource im, int red, int green, int blue)
Get the index of the closest color to the specified color */
PHP_FUNCTION(imagecolorclosest)
{
zval *IM;
long red, green, blue;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorClosest(im, red, green, blue));
}
/* }}} */
/* {{{ proto int imagecolorclosesthwb(resource im, int red, int green, int blue)
Get the index of the color which has the hue, white and blackness nearest to the given color */
PHP_FUNCTION(imagecolorclosesthwb)
{
zval *IM;
long red, green, blue;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorClosestHWB(im, red, green, blue));
}
/* }}} */
/* {{{ proto bool imagecolordeallocate(resource im, int index)
De-allocate a color for an image */
PHP_FUNCTION(imagecolordeallocate)
{
zval *IM;
long index;
int col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &index) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
/* We can return right away for a truecolor image as deallocating colours is meaningless here */
if (gdImageTrueColor(im)) {
RETURN_TRUE;
}
col = index;
if (col >= 0 && col < gdImageColorsTotal(im)) {
gdImageColorDeallocate(im, col);
RETURN_TRUE;
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Color index %d out of range", col);
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto int imagecolorresolve(resource im, int red, int green, int blue)
Get the index of the specified color or its closest possible alternative */
PHP_FUNCTION(imagecolorresolve)
{
zval *IM;
long red, green, blue;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorResolve(im, red, green, blue));
}
/* }}} */
/* {{{ proto int imagecolorexact(resource im, int red, int green, int blue)
Get the index of the specified color */
PHP_FUNCTION(imagecolorexact)
{
zval *IM;
long red, green, blue;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &red, &green, &blue) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorExact(im, red, green, blue));
}
/* }}} */
/* {{{ proto void imagecolorset(resource im, int col, int red, int green, int blue)
Set the color for the specified palette index */
PHP_FUNCTION(imagecolorset)
{
zval *IM;
long color, red, green, blue, alpha = 0;
int col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll|l", &IM, &color, &red, &green, &blue, &alpha) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
col = color;
if (col >= 0 && col < gdImageColorsTotal(im)) {
im->red[col] = red;
im->green[col] = green;
im->blue[col] = blue;
im->alpha[col] = alpha;
} else {
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto array imagecolorsforindex(resource im, int col)
Get the colors for an index */
PHP_FUNCTION(imagecolorsforindex)
{
zval *IM;
long index;
int col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &index) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
col = index;
if ((col >= 0 && gdImageTrueColor(im)) || (!gdImageTrueColor(im) && col >= 0 && col < gdImageColorsTotal(im))) {
array_init(return_value);
add_assoc_long(return_value,"red", gdImageRed(im,col));
add_assoc_long(return_value,"green", gdImageGreen(im,col));
add_assoc_long(return_value,"blue", gdImageBlue(im,col));
add_assoc_long(return_value,"alpha", gdImageAlpha(im,col));
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Color index %d out of range", col);
RETURN_FALSE;
}
}
/* }}} */
/* {{{ proto bool imagegammacorrect(resource im, float inputgamma, float outputgamma)
Apply a gamma correction to a GD image */
PHP_FUNCTION(imagegammacorrect)
{
zval *IM;
gdImagePtr im;
int i;
double input, output;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rdd", &IM, &input, &output) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (gdImageTrueColor(im)) {
int x, y, c;
for (y = 0; y < gdImageSY(im); y++) {
for (x = 0; x < gdImageSX(im); x++) {
c = gdImageGetPixel(im, x, y);
gdImageSetPixel(im, x, y,
gdTrueColor(
(int) ((pow((pow((gdTrueColorGetRed(c) / 255.0), input)), 1.0 / output) * 255) + .5),
(int) ((pow((pow((gdTrueColorGetGreen(c) / 255.0), input)), 1.0 / output) * 255) + .5),
(int) ((pow((pow((gdTrueColorGetBlue(c) / 255.0), input)), 1.0 / output) * 255) + .5)
)
);
}
}
RETURN_TRUE;
}
for (i = 0; i < gdImageColorsTotal(im); i++) {
im->red[i] = (int)((pow((pow((im->red[i] / 255.0), input)), 1.0 / output) * 255) + .5);
im->green[i] = (int)((pow((pow((im->green[i] / 255.0), input)), 1.0 / output) * 255) + .5);
im->blue[i] = (int)((pow((pow((im->blue[i] / 255.0), input)), 1.0 / output) * 255) + .5);
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagesetpixel(resource im, int x, int y, int col)
Set a single pixel */
PHP_FUNCTION(imagesetpixel)
{
zval *IM;
long x, y, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &x, &y, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageSetPixel(im, x, y, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imageline(resource im, int x1, int y1, int x2, int y2, int col)
Draw a line */
PHP_FUNCTION(imageline)
{
zval *IM;
long x1, y1, x2, y2, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
#ifdef HAVE_GD_BUNDLED
if (im->antialias) {
gdImageAALine(im, x1, y1, x2, y2, col);
} else
#endif
{
gdImageLine(im, x1, y1, x2, y2, col);
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagedashedline(resource im, int x1, int y1, int x2, int y2, int col)
Draw a dashed line */
PHP_FUNCTION(imagedashedline)
{
zval *IM;
long x1, y1, x2, y2, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageDashedLine(im, x1, y1, x2, y2, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagerectangle(resource im, int x1, int y1, int x2, int y2, int col)
Draw a rectangle */
PHP_FUNCTION(imagerectangle)
{
zval *IM;
long x1, y1, x2, y2, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageRectangle(im, x1, y1, x2, y2, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagefilledrectangle(resource im, int x1, int y1, int x2, int y2, int col)
Draw a filled rectangle */
PHP_FUNCTION(imagefilledrectangle)
{
zval *IM;
long x1, y1, x2, y2, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &x1, &y1, &x2, &y2, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageFilledRectangle(im, x1, y1, x2, y2, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagearc(resource im, int cx, int cy, int w, int h, int s, int e, int col)
Draw a partial ellipse */
PHP_FUNCTION(imagearc)
{
zval *IM;
long cx, cy, w, h, ST, E, col;
gdImagePtr im;
int e, st;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllllll", &IM, &cx, &cy, &w, &h, &ST, &E, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
e = E;
if (e < 0) {
e %= 360;
}
st = ST;
if (st < 0) {
st %= 360;
}
gdImageArc(im, cx, cy, w, h, st, e, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imageellipse(resource im, int cx, int cy, int w, int h, int color)
Draw an ellipse */
PHP_FUNCTION(imageellipse)
{
zval *IM;
long cx, cy, w, h, color;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllll", &IM, &cx, &cy, &w, &h, &color) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageEllipse(im, cx, cy, w, h, color);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagefilltoborder(resource im, int x, int y, int border, int col)
Flood fill to specific color */
PHP_FUNCTION(imagefilltoborder)
{
zval *IM;
long x, y, border, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &x, &y, &border, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageFillToBorder(im, x, y, border, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagefill(resource im, int x, int y, int col)
Flood fill */
PHP_FUNCTION(imagefill)
{
zval *IM;
long x, y, col;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &x, &y, &col) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageFill(im, x, y, col);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto int imagecolorstotal(resource im)
Find out the number of colors in an image's palette */
PHP_FUNCTION(imagecolorstotal)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorsTotal(im));
}
/* }}} */
/* {{{ proto int imagecolortransparent(resource im [, int col])
Define a color as transparent */
PHP_FUNCTION(imagecolortransparent)
{
zval *IM;
long COL = 0;
gdImagePtr im;
int argc = ZEND_NUM_ARGS();
if (zend_parse_parameters(argc TSRMLS_CC, "r|l", &IM, &COL) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (argc > 1) {
gdImageColorTransparent(im, COL);
}
RETURN_LONG(gdImageGetTransparent(im));
}
/* }}} */
/* {{{ proto int imageinterlace(resource im [, int interlace])
Enable or disable interlace */
PHP_FUNCTION(imageinterlace)
{
zval *IM;
int argc = ZEND_NUM_ARGS();
long INT = 0;
gdImagePtr im;
if (zend_parse_parameters(argc TSRMLS_CC, "r|l", &IM, &INT) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (argc > 1) {
gdImageInterlace(im, INT);
}
RETURN_LONG(gdImageGetInterlaced(im));
}
/* }}} */
/* {{{ php_imagepolygon
arg = 0 normal polygon
arg = 1 filled polygon */
/* im, points, num_points, col */
static void php_imagepolygon(INTERNAL_FUNCTION_PARAMETERS, int filled)
{
zval *IM, *POINTS;
long NPOINTS, COL;
zval **var = NULL;
gdImagePtr im;
gdPointPtr points;
int npoints, col, nelem, i;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rall", &IM, &POINTS, &NPOINTS, &COL) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
npoints = NPOINTS;
col = COL;
nelem = zend_hash_num_elements(Z_ARRVAL_P(POINTS));
if (nelem < 6) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have at least 3 points in your array");
RETURN_FALSE;
}
if (npoints <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must give a positive number of points");
RETURN_FALSE;
}
if (nelem < npoints * 2) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Trying to use %d points in array with only %d points", npoints, nelem/2);
RETURN_FALSE;
}
points = (gdPointPtr) safe_emalloc(npoints, sizeof(gdPoint), 0);
for (i = 0; i < npoints; i++) {
if (zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2), (void **) &var) == SUCCESS) {
if (Z_TYPE_PP(var) != IS_LONG) {
zval lval;
lval = **var;
zval_copy_ctor(&lval);
convert_to_long(&lval);
points[i].x = Z_LVAL(lval);
} else {
points[i].x = Z_LVAL_PP(var);
}
}
if (zend_hash_index_find(Z_ARRVAL_P(POINTS), (i * 2) + 1, (void **) &var) == SUCCESS) {
if (Z_TYPE_PP(var) != IS_LONG) {
zval lval;
lval = **var;
zval_copy_ctor(&lval);
convert_to_long(&lval);
points[i].y = Z_LVAL(lval);
} else {
points[i].y = Z_LVAL_PP(var);
}
}
}
if (filled) {
gdImageFilledPolygon(im, points, npoints, col);
} else {
gdImagePolygon(im, points, npoints, col);
}
efree(points);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagepolygon(resource im, array point, int num_points, int col)
Draw a polygon */
PHP_FUNCTION(imagepolygon)
{
php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
/* }}} */
/* {{{ proto bool imagefilledpolygon(resource im, array point, int num_points, int col)
Draw a filled polygon */
PHP_FUNCTION(imagefilledpolygon)
{
php_imagepolygon(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ php_find_gd_font
*/
static gdFontPtr php_find_gd_font(int size TSRMLS_DC)
{
gdFontPtr font;
int ind_type;
switch (size) {
case 1:
font = gdFontTiny;
break;
case 2:
font = gdFontSmall;
break;
case 3:
font = gdFontMediumBold;
break;
case 4:
font = gdFontLarge;
break;
case 5:
font = gdFontGiant;
break;
default:
font = zend_list_find(size - 5, &ind_type);
if (!font || ind_type != le_gd_font) {
if (size < 1) {
font = gdFontTiny;
} else {
font = gdFontGiant;
}
}
break;
}
return font;
}
/* }}} */
/* {{{ php_imagefontsize
* arg = 0 ImageFontWidth
* arg = 1 ImageFontHeight
*/
static void php_imagefontsize(INTERNAL_FUNCTION_PARAMETERS, int arg)
{
long SIZE;
gdFontPtr font;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &SIZE) == FAILURE) {
return;
}
font = php_find_gd_font(SIZE TSRMLS_CC);
RETURN_LONG(arg ? font->h : font->w);
}
/* }}} */
/* {{{ proto int imagefontwidth(int font)
Get font width */
PHP_FUNCTION(imagefontwidth)
{
php_imagefontsize(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
/* }}} */
/* {{{ proto int imagefontheight(int font)
Get font height */
PHP_FUNCTION(imagefontheight)
{
php_imagefontsize(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ php_gdimagecharup
* workaround for a bug in gd 1.2 */
static void php_gdimagecharup(gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy, px, py, fline;
cx = 0;
cy = 0;
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py > (y - f->w)); py--) {
for (px = x; (px < (x + f->h)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel(im, px, py, color);
}
cy++;
}
cy = 0;
cx++;
}
}
/* }}} */
/* {{{ php_imagechar
* arg = 0 ImageChar
* arg = 1 ImageCharUp
* arg = 2 ImageString
* arg = 3 ImageStringUp
*/
static void php_imagechar(INTERNAL_FUNCTION_PARAMETERS, int mode)
{
zval *IM;
long SIZE, X, Y, COL;
char *C;
int C_len;
gdImagePtr im;
int ch = 0, col, x, y, size, i, l = 0;
unsigned char *str = NULL;
gdFontPtr font;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllsl", &IM, &SIZE, &X, &Y, &C, &C_len, &COL) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
col = COL;
if (mode < 2) {
ch = (int)((unsigned char)*C);
} else {
str = (unsigned char *) estrndup(C, C_len);
l = strlen((char *)str);
}
y = Y;
x = X;
size = SIZE;
font = php_find_gd_font(size TSRMLS_CC);
switch (mode) {
case 0:
gdImageChar(im, font, x, y, ch, col);
break;
case 1:
php_gdimagecharup(im, font, x, y, ch, col);
break;
case 2:
for (i = 0; (i < l); i++) {
gdImageChar(im, font, x, y, (int) ((unsigned char) str[i]), col);
x += font->w;
}
break;
case 3: {
for (i = 0; (i < l); i++) {
/* php_gdimagecharup(im, font, x, y, (int) str[i], col); */
gdImageCharUp(im, font, x, y, (int) str[i], col);
y -= font->w;
}
break;
}
}
if (str) {
efree(str);
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagechar(resource im, int font, int x, int y, string c, int col)
Draw a character */
PHP_FUNCTION(imagechar)
{
php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
/* }}} */
/* {{{ proto bool imagecharup(resource im, int font, int x, int y, string c, int col)
Draw a character rotated 90 degrees counter-clockwise */
PHP_FUNCTION(imagecharup)
{
php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1);
}
/* }}} */
/* {{{ proto bool imagestring(resource im, int font, int x, int y, string str, int col)
Draw a string horizontally */
PHP_FUNCTION(imagestring)
{
php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2);
}
/* }}} */
/* {{{ proto bool imagestringup(resource im, int font, int x, int y, string str, int col)
Draw a string vertically - rotated 90 degrees counter-clockwise */
PHP_FUNCTION(imagestringup)
{
php_imagechar(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3);
}
/* }}} */
/* {{{ proto bool imagecopy(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h)
Copy part of an image */
PHP_FUNCTION(imagecopy)
{
zval *SIM, *DIM;
long SX, SY, SW, SH, DX, DY;
gdImagePtr im_dst, im_src;
int srcH, srcW, srcY, srcX, dstY, dstX;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd);
srcX = SX;
srcY = SY;
srcH = SH;
srcW = SW;
dstX = DX;
dstY = DY;
gdImageCopy(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagecopymerge(resource src_im, resource dst_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h, int pct)
Merge one part of an image with another */
PHP_FUNCTION(imagecopymerge)
{
zval *SIM, *DIM;
long SX, SY, SW, SH, DX, DY, PCT;
gdImagePtr im_dst, im_src;
int srcH, srcW, srcY, srcX, dstY, dstX, pct;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd);
srcX = SX;
srcY = SY;
srcH = SH;
srcW = SW;
dstX = DX;
dstY = DY;
pct = PCT;
gdImageCopyMerge(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagecopymergegray(resource src_im, resource dst_im, int dst_x, int dst_y, int src_x, int src_y, int src_w, int src_h, int pct)
Merge one part of an image with another */
PHP_FUNCTION(imagecopymergegray)
{
zval *SIM, *DIM;
long SX, SY, SW, SH, DX, DY, PCT;
gdImagePtr im_dst, im_src;
int srcH, srcW, srcY, srcX, dstY, dstX, pct;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrlllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &SW, &SH, &PCT) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd);
srcX = SX;
srcY = SY;
srcH = SH;
srcW = SW;
dstX = DX;
dstY = DY;
pct = PCT;
gdImageCopyMergeGray(im_dst, im_src, dstX, dstY, srcX, srcY, srcW, srcH, pct);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagecopyresized(resource dst_im, resource src_im, int dst_x, int dst_y, int src_x, int src_y, int dst_w, int dst_h, int src_w, int src_h)
Copy and resize part of an image */
PHP_FUNCTION(imagecopyresized)
{
zval *SIM, *DIM;
long SX, SY, SW, SH, DX, DY, DW, DH;
gdImagePtr im_dst, im_src;
int srcH, srcW, dstH, dstW, srcY, srcX, dstY, dstX;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rrllllllll", &DIM, &SIM, &DX, &DY, &SX, &SY, &DW, &DH, &SW, &SH) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im_dst, gdImagePtr, &DIM, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
srcX = SX;
srcY = SY;
srcH = SH;
srcW = SW;
dstX = DX;
dstY = DY;
dstH = DH;
dstW = DW;
if (dstW <= 0 || dstH <= 0 || srcW <= 0 || srcH <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions");
RETURN_FALSE;
}
gdImageCopyResized(im_dst, im_src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto int imagesx(resource im)
Get image width */
PHP_FUNCTION(imagesx)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageSX(im));
}
/* }}} */
/* {{{ proto int imagesy(resource im)
Get image height */
PHP_FUNCTION(imagesy)
{
zval *IM;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &IM) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageSY(im));
}
/* }}} */
#ifdef ENABLE_GD_TTF
#define TTFTEXT_DRAW 0
#define TTFTEXT_BBOX 1
#endif
#ifdef ENABLE_GD_TTF
#if HAVE_GD_FREETYPE && HAVE_LIBFREETYPE
/* {{{ proto array imageftbbox(float size, float angle, string font_file, string text [, array extrainfo])
Give the bounding box of a text using fonts via freetype2 */
PHP_FUNCTION(imageftbbox)
{
php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_BBOX, 1);
}
/* }}} */
/* {{{ proto array imagefttext(resource im, float size, float angle, int x, int y, int col, string font_file, string text [, array extrainfo])
Write text to the image using fonts via freetype2 */
PHP_FUNCTION(imagefttext)
{
php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_DRAW, 1);
}
/* }}} */
#endif /* HAVE_GD_FREETYPE && HAVE_LIBFREETYPE */
/* {{{ proto array imagettfbbox(float size, float angle, string font_file, string text)
Give the bounding box of a text using TrueType fonts */
PHP_FUNCTION(imagettfbbox)
{
php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_BBOX, 0);
}
/* }}} */
/* {{{ proto array imagettftext(resource im, float size, float angle, int x, int y, int col, string font_file, string text)
Write text to the image using a TrueType font */
PHP_FUNCTION(imagettftext)
{
php_imagettftext_common(INTERNAL_FUNCTION_PARAM_PASSTHRU, TTFTEXT_DRAW, 0);
}
/* }}} */
/* {{{ php_imagettftext_common
*/
static void php_imagettftext_common(INTERNAL_FUNCTION_PARAMETERS, int mode, int extended)
{
zval *IM, *EXT = NULL;
gdImagePtr im=NULL;
long col = -1, x = -1, y = -1;
int str_len, fontname_len, i, brect[8];
double ptsize, angle;
char *str = NULL, *fontname = NULL;
char *error = NULL;
int argc = ZEND_NUM_ARGS();
gdFTStringExtra strex = {0};
if (mode == TTFTEXT_BBOX) {
if (argc < 4 || argc > ((extended) ? 5 : 4)) {
ZEND_WRONG_PARAM_COUNT();
} else if (zend_parse_parameters(argc TSRMLS_CC, "ddss|a", &ptsize, &angle, &fontname, &fontname_len, &str, &str_len, &EXT) == FAILURE) {
RETURN_FALSE;
}
} else {
if (argc < 8 || argc > ((extended) ? 9 : 8)) {
ZEND_WRONG_PARAM_COUNT();
} else if (zend_parse_parameters(argc TSRMLS_CC, "rddlllss|a", &IM, &ptsize, &angle, &x, &y, &col, &fontname, &fontname_len, &str, &str_len, &EXT) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
}
/* convert angle to radians */
angle = angle * (M_PI/180);
if (extended && EXT) { /* parse extended info */
HashPosition pos;
/* walk the assoc array */
zend_hash_internal_pointer_reset_ex(HASH_OF(EXT), &pos);
do {
zval ** item;
char * key;
ulong num_key;
if (zend_hash_get_current_key_ex(HASH_OF(EXT), &key, NULL, &num_key, 0, &pos) != HASH_KEY_IS_STRING) {
continue;
}
if (zend_hash_get_current_data_ex(HASH_OF(EXT), (void **) &item, &pos) == FAILURE) {
continue;
}
if (strcmp("linespacing", key) == 0) {
convert_to_double_ex(item);
strex.flags |= gdFTEX_LINESPACE;
strex.linespacing = Z_DVAL_PP(item);
}
} while (zend_hash_move_forward_ex(HASH_OF(EXT), &pos) == SUCCESS);
}
#ifdef VIRTUAL_DIR
{
char tmp_font_path[MAXPATHLEN];
if (!VCWD_REALPATH(fontname, tmp_font_path)) {
fontname = NULL;
}
}
#endif /* VIRTUAL_DIR */
PHP_GD_CHECK_OPEN_BASEDIR(fontname, "Invalid font filename");
#ifdef HAVE_GD_FREETYPE
if (extended) {
error = gdImageStringFTEx(im, brect, col, fontname, ptsize, angle, x, y, str, &strex);
}
else
error = gdImageStringFT(im, brect, col, fontname, ptsize, angle, x, y, str);
#endif /* HAVE_GD_FREETYPE */
if (error) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%s", error);
RETURN_FALSE;
}
array_init(return_value);
/* return array with the text's bounding box */
for (i = 0; i < 8; i++) {
add_next_index_long(return_value, brect[i]);
}
}
/* }}} */
#endif /* ENABLE_GD_TTF */
#if HAVE_LIBT1
/* {{{ php_free_ps_font
*/
static void php_free_ps_font(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
int *font = (int *) rsrc->ptr;
T1_DeleteFont(*font);
efree(font);
}
/* }}} */
/* {{{ php_free_ps_enc
*/
static void php_free_ps_enc(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
char **enc = (char **) rsrc->ptr;
T1_DeleteEncoding(enc);
}
/* }}} */
/* {{{ proto resource imagepsloadfont(string pathname)
Load a new font from specified file */
PHP_FUNCTION(imagepsloadfont)
{
char *file;
int file_len, f_ind, *font;
#ifdef PHP_WIN32
struct stat st;
#endif
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &file, &file_len) == FAILURE) {
return;
}
#ifdef PHP_WIN32
if (VCWD_STAT(file, &st) < 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Font file not found (%s)", file);
RETURN_FALSE;
}
#endif
f_ind = T1_AddFont(file);
if (f_ind < 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "T1Lib Error (%i): %s", f_ind, T1_StrError(f_ind));
RETURN_FALSE;
}
if (T1_LoadFont(f_ind)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't load the font");
RETURN_FALSE;
}
font = (int *) emalloc(sizeof(int));
*font = f_ind;
ZEND_REGISTER_RESOURCE(return_value, font, le_ps_font);
}
/* }}} */
/* {{{ proto int imagepscopyfont(int font_index)
Make a copy of a font for purposes like extending or reenconding */
/* The function in t1lib which this function uses seem to be buggy...
PHP_FUNCTION(imagepscopyfont)
{
int l_ind, type;
gd_ps_font *nf_ind, *of_ind;
long fnt;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &fnt) == FAILURE) {
return;
}
of_ind = zend_list_find(fnt, &type);
if (type != le_ps_font) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "%ld is not a Type 1 font index", fnt);
RETURN_FALSE;
}
nf_ind = emalloc(sizeof(gd_ps_font));
nf_ind->font_id = T1_CopyFont(of_ind->font_id);
if (nf_ind->font_id < 0) {
l_ind = nf_ind->font_id;
efree(nf_ind);
switch (l_ind) {
case -1:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "FontID %d is not loaded in memory", l_ind);
RETURN_FALSE;
break;
case -2:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Tried to copy a logical font");
RETURN_FALSE;
break;
case -3:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Memory allocation fault in t1lib");
RETURN_FALSE;
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "An unknown error occurred in t1lib");
RETURN_FALSE;
break;
}
}
nf_ind->extend = 1;
l_ind = zend_list_insert(nf_ind, le_ps_font TSRMLS_CC);
RETURN_LONG(l_ind);
}
*/
/* }}} */
/* {{{ proto bool imagepsfreefont(resource font_index)
Free memory used by a font */
PHP_FUNCTION(imagepsfreefont)
{
zval *fnt;
int *f_ind;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &fnt) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font);
zend_list_delete(Z_LVAL_P(fnt));
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagepsencodefont(resource font_index, string filename)
To change a fonts character encoding vector */
PHP_FUNCTION(imagepsencodefont)
{
zval *fnt;
char *enc, **enc_vector;
int enc_len, *f_ind;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &fnt, &enc, &enc_len) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font);
if ((enc_vector = T1_LoadEncoding(enc)) == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't load encoding vector from %s", enc);
RETURN_FALSE;
}
T1_DeleteAllSizes(*f_ind);
if (T1_ReencodeFont(*f_ind, enc_vector)) {
T1_DeleteEncoding(enc_vector);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Couldn't re-encode font");
RETURN_FALSE;
}
zend_list_insert(enc_vector, le_ps_enc TSRMLS_CC);
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagepsextendfont(resource font_index, float extend)
Extend or or condense (if extend < 1) a font */
PHP_FUNCTION(imagepsextendfont)
{
zval *fnt;
double ext;
int *f_ind;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rd", &fnt, &ext) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font);
T1_DeleteAllSizes(*f_ind);
if (ext <= 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Second parameter %F out of range (must be > 0)", ext);
RETURN_FALSE;
}
if (T1_ExtendFont(*f_ind, ext) != 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto bool imagepsslantfont(resource font_index, float slant)
Slant a font */
PHP_FUNCTION(imagepsslantfont)
{
zval *fnt;
double slt;
int *f_ind;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rd", &fnt, &slt) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font);
if (T1_SlantFont(*f_ind, slt) != 0) {
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
/* {{{ proto array imagepstext(resource image, string text, resource font, int size, int foreground, int background, int xcoord, int ycoord [, int space [, int tightness [, float angle [, int antialias])
Rasterize a string over an image */
PHP_FUNCTION(imagepstext)
{
zval *img, *fnt;
int i, j;
long _fg, _bg, x, y, size, space = 0, aa_steps = 4, width = 0;
int *f_ind;
int h_lines, v_lines, c_ind;
int rd, gr, bl, fg_rd, fg_gr, fg_bl, bg_rd, bg_gr, bg_bl;
int fg_al, bg_al, al;
int aa[16];
int amount_kern, add_width;
double angle = 0.0, extend;
unsigned long aa_greys[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
gdImagePtr bg_img;
GLYPH *str_img;
T1_OUTLINE *char_path, *str_path;
T1_TMATRIX *transform = NULL;
char *str;
int str_len;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rsrlllll|lldl", &img, &str, &str_len, &fnt, &size, &_fg, &_bg, &x, &y, &space, &width, &angle, &aa_steps) == FAILURE) {
return;
}
if (aa_steps != 4 && aa_steps != 16) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Antialias steps must be 4 or 16");
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(bg_img, gdImagePtr, &img, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font);
/* Ensure that the provided colors are valid */
if (_fg < 0 || (!gdImageTrueColor(bg_img) && _fg > gdImageColorsTotal(bg_img))) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Foreground color index %ld out of range", _fg);
RETURN_FALSE;
}
if (_bg < 0 || (!gdImageTrueColor(bg_img) && _fg > gdImageColorsTotal(bg_img))) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Background color index %ld out of range", _bg);
RETURN_FALSE;
}
fg_rd = gdImageRed (bg_img, _fg);
fg_gr = gdImageGreen(bg_img, _fg);
fg_bl = gdImageBlue (bg_img, _fg);
fg_al = gdImageAlpha(bg_img, _fg);
bg_rd = gdImageRed (bg_img, _bg);
bg_gr = gdImageGreen(bg_img, _bg);
bg_bl = gdImageBlue (bg_img, _bg);
bg_al = gdImageAlpha(bg_img, _bg);
for (i = 0; i < aa_steps; i++) {
rd = bg_rd + (double) (fg_rd - bg_rd) / aa_steps * (i + 1);
gr = bg_gr + (double) (fg_gr - bg_gr) / aa_steps * (i + 1);
bl = bg_bl + (double) (fg_bl - bg_bl) / aa_steps * (i + 1);
al = bg_al + (double) (fg_al - bg_al) / aa_steps * (i + 1);
aa[i] = gdImageColorResolveAlpha(bg_img, rd, gr, bl, al);
}
T1_AASetBitsPerPixel(8);
switch (aa_steps) {
case 4:
T1_AASetGrayValues(0, 1, 2, 3, 4);
T1_AASetLevel(T1_AA_LOW);
break;
case 16:
T1_AAHSetGrayValues(aa_greys);
T1_AASetLevel(T1_AA_HIGH);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid value %ld as number of steps for antialiasing", aa_steps);
RETURN_FALSE;
}
if (angle) {
transform = T1_RotateMatrix(NULL, angle);
}
if (width) {
extend = T1_GetExtend(*f_ind);
str_path = T1_GetCharOutline(*f_ind, str[0], size, transform);
if (!str_path) {
if (T1_errno) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "T1Lib Error: %s", T1_StrError(T1_errno));
}
RETURN_FALSE;
}
for (i = 1; i < str_len; i++) {
amount_kern = (int) T1_GetKerning(*f_ind, str[i - 1], str[i]);
amount_kern += str[i - 1] == ' ' ? space : 0;
add_width = (int) (amount_kern + width) / extend;
char_path = T1_GetMoveOutline(*f_ind, add_width, 0, 0, size, transform);
str_path = T1_ConcatOutlines(str_path, char_path);
char_path = T1_GetCharOutline(*f_ind, str[i], size, transform);
str_path = T1_ConcatOutlines(str_path, char_path);
}
str_img = T1_AAFillOutline(str_path, 0);
} else {
str_img = T1_AASetString(*f_ind, str, str_len, space, T1_KERNING, size, transform);
}
if (T1_errno) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "T1Lib Error: %s", T1_StrError(T1_errno));
RETURN_FALSE;
}
h_lines = str_img->metrics.ascent - str_img->metrics.descent;
v_lines = str_img->metrics.rightSideBearing - str_img->metrics.leftSideBearing;
for (i = 0; i < v_lines; i++) {
for (j = 0; j < h_lines; j++) {
switch (str_img->bits[j * v_lines + i]) {
case 0:
break;
default:
c_ind = aa[str_img->bits[j * v_lines + i] - 1];
gdImageSetPixel(bg_img, x + str_img->metrics.leftSideBearing + i, y - str_img->metrics.ascent + j, c_ind);
break;
}
}
}
array_init(return_value);
add_next_index_long(return_value, str_img->metrics.leftSideBearing);
add_next_index_long(return_value, str_img->metrics.descent);
add_next_index_long(return_value, str_img->metrics.rightSideBearing);
add_next_index_long(return_value, str_img->metrics.ascent);
}
/* }}} */
/* {{{ proto array imagepsbbox(string text, resource font, int size [, int space, int tightness, float angle])
Return the bounding box needed by a string if rasterized */
PHP_FUNCTION(imagepsbbox)
{
zval *fnt;
long sz = 0, sp = 0, wd = 0;
char *str;
int i, space = 0, add_width = 0, char_width, amount_kern;
int cur_x, cur_y, dx, dy;
int x1, y1, x2, y2, x3, y3, x4, y4;
int *f_ind;
int str_len, per_char = 0;
int argc = ZEND_NUM_ARGS();
double angle = 0, sin_a = 0, cos_a = 0;
BBox char_bbox, str_bbox = {0, 0, 0, 0};
if (argc != 3 && argc != 6) {
ZEND_WRONG_PARAM_COUNT();
}
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "srl|lld", &str, &str_len, &fnt, &sz, &sp, &wd, &angle) == FAILURE) {
return;
}
if (argc == 6) {
space = sp;
add_width = wd;
angle = angle * M_PI / 180;
sin_a = sin(angle);
cos_a = cos(angle);
per_char = add_width || angle ? 1 : 0;
}
ZEND_FETCH_RESOURCE(f_ind, int *, &fnt, -1, "Type 1 font", le_ps_font);
#define max(a, b) (a > b ? a : b)
#define min(a, b) (a < b ? a : b)
#define new_x(a, b) (int) ((a) * cos_a - (b) * sin_a)
#define new_y(a, b) (int) ((a) * sin_a + (b) * cos_a)
if (per_char) {
space += T1_GetCharWidth(*f_ind, ' ');
cur_x = cur_y = 0;
for (i = 0; i < str_len; i++) {
if (str[i] == ' ') {
char_bbox.llx = char_bbox.lly = char_bbox.ury = 0;
char_bbox.urx = char_width = space;
} else {
char_bbox = T1_GetCharBBox(*f_ind, str[i]);
char_width = T1_GetCharWidth(*f_ind, str[i]);
}
amount_kern = i ? T1_GetKerning(*f_ind, str[i - 1], str[i]) : 0;
/* Transfer character bounding box to right place */
x1 = new_x(char_bbox.llx, char_bbox.lly) + cur_x;
y1 = new_y(char_bbox.llx, char_bbox.lly) + cur_y;
x2 = new_x(char_bbox.llx, char_bbox.ury) + cur_x;
y2 = new_y(char_bbox.llx, char_bbox.ury) + cur_y;
x3 = new_x(char_bbox.urx, char_bbox.ury) + cur_x;
y3 = new_y(char_bbox.urx, char_bbox.ury) + cur_y;
x4 = new_x(char_bbox.urx, char_bbox.lly) + cur_x;
y4 = new_y(char_bbox.urx, char_bbox.lly) + cur_y;
/* Find min & max values and compare them with current bounding box */
str_bbox.llx = min(str_bbox.llx, min(x1, min(x2, min(x3, x4))));
str_bbox.lly = min(str_bbox.lly, min(y1, min(y2, min(y3, y4))));
str_bbox.urx = max(str_bbox.urx, max(x1, max(x2, max(x3, x4))));
str_bbox.ury = max(str_bbox.ury, max(y1, max(y2, max(y3, y4))));
/* Move to the next base point */
dx = new_x(char_width + add_width + amount_kern, 0);
dy = new_y(char_width + add_width + amount_kern, 0);
cur_x += dx;
cur_y += dy;
/*
printf("%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", x1, y1, x2, y2, x3, y3, x4, y4, char_bbox.llx, char_bbox.lly, char_bbox.urx, char_bbox.ury, char_width, amount_kern, cur_x, cur_y, dx, dy);
*/
}
} else {
str_bbox = T1_GetStringBBox(*f_ind, str, str_len, space, T1_KERNING);
}
if (T1_errno) {
RETURN_FALSE;
}
array_init(return_value);
/*
printf("%d %d %d %d\n", str_bbox.llx, str_bbox.lly, str_bbox.urx, str_bbox.ury);
*/
add_next_index_long(return_value, (int) ceil(((double) str_bbox.llx)*sz/1000));
add_next_index_long(return_value, (int) ceil(((double) str_bbox.lly)*sz/1000));
add_next_index_long(return_value, (int) ceil(((double) str_bbox.urx)*sz/1000));
add_next_index_long(return_value, (int) ceil(((double) str_bbox.ury)*sz/1000));
}
/* }}} */
#endif
/* {{{ proto bool image2wbmp(resource im [, string filename [, int threshold]])
Output WBMP image to browser or file */
PHP_FUNCTION(image2wbmp)
{
_php_image_output(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_CONVERT_WBM, "WBMP", _php_image_bw_convert);
}
/* }}} */
#if defined(HAVE_GD_JPG)
/* {{{ proto bool jpeg2wbmp (string f_org, string f_dest, int d_height, int d_width, int threshold)
Convert JPEG image to WBMP image */
PHP_FUNCTION(jpeg2wbmp)
{
_php_image_convert(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_JPG);
}
/* }}} */
#endif
#if defined(HAVE_GD_PNG)
/* {{{ proto bool png2wbmp (string f_org, string f_dest, int d_height, int d_width, int threshold)
Convert PNG image to WBMP image */
PHP_FUNCTION(png2wbmp)
{
_php_image_convert(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_PNG);
}
/* }}} */
#endif
/* {{{ _php_image_bw_convert
* It converts a gd Image to bw using a threshold value */
static void _php_image_bw_convert(gdImagePtr im_org, gdIOCtx *out, int threshold)
{
gdImagePtr im_dest;
int white, black;
int color, color_org, median;
int dest_height = gdImageSY(im_org);
int dest_width = gdImageSX(im_org);
int x, y;
TSRMLS_FETCH();
im_dest = gdImageCreate(dest_width, dest_height);
if (im_dest == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate temporary buffer");
return;
}
white = gdImageColorAllocate(im_dest, 255, 255, 255);
if (white == -1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer");
return;
}
black = gdImageColorAllocate(im_dest, 0, 0, 0);
if (black == -1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer");
return;
}
if (im_org->trueColor) {
gdImageTrueColorToPalette(im_org, 1, 256);
}
for (y = 0; y < dest_height; y++) {
for (x = 0; x < dest_width; x++) {
color_org = gdImageGetPixel(im_org, x, y);
median = (im_org->red[color_org] + im_org->green[color_org] + im_org->blue[color_org]) / 3;
if (median < threshold) {
color = black;
} else {
color = white;
}
gdImageSetPixel (im_dest, x, y, color);
}
}
gdImageWBMPCtx (im_dest, black, out);
}
/* }}} */
/* {{{ _php_image_convert
* _php_image_convert converts jpeg/png images to wbmp and resizes them as needed */
static void _php_image_convert(INTERNAL_FUNCTION_PARAMETERS, int image_type )
{
char *f_org, *f_dest;
int f_org_len, f_dest_len;
long height, width, threshold;
gdImagePtr im_org, im_dest, im_tmp;
char *fn_org = NULL;
char *fn_dest = NULL;
FILE *org, *dest;
int dest_height = -1;
int dest_width = -1;
int org_height, org_width;
int white, black;
int color, color_org, median;
int int_threshold;
int x, y;
float x_ratio, y_ratio;
long ignore_warning;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "pplll", &f_org, &f_org_len, &f_dest, &f_dest_len, &height, &width, &threshold) == FAILURE) {
return;
}
fn_org = f_org;
fn_dest = f_dest;
dest_height = height;
dest_width = width;
int_threshold = threshold;
/* Check threshold value */
if (int_threshold < 0 || int_threshold > 8) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid threshold value '%d'", int_threshold);
RETURN_FALSE;
}
/* Check origin file */
PHP_GD_CHECK_OPEN_BASEDIR(fn_org, "Invalid origin filename");
/* Check destination file */
PHP_GD_CHECK_OPEN_BASEDIR(fn_dest, "Invalid destination filename");
/* Open origin file */
org = VCWD_FOPEN(fn_org, "rb");
if (!org) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' for reading", fn_org);
RETURN_FALSE;
}
/* Open destination file */
dest = VCWD_FOPEN(fn_dest, "wb");
if (!dest) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' for writing", fn_dest);
RETURN_FALSE;
}
switch (image_type) {
case PHP_GDIMG_TYPE_GIF:
im_org = gdImageCreateFromGif(org);
if (im_org == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' Not a valid GIF file", fn_dest);
RETURN_FALSE;
}
break;
#ifdef HAVE_GD_JPG
case PHP_GDIMG_TYPE_JPG:
ignore_warning = INI_INT("gd.jpeg_ignore_warning");
im_org = gdImageCreateFromJpegEx(org, ignore_warning);
if (im_org == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' Not a valid JPEG file", fn_dest);
RETURN_FALSE;
}
break;
#endif /* HAVE_GD_JPG */
#ifdef HAVE_GD_PNG
case PHP_GDIMG_TYPE_PNG:
im_org = gdImageCreateFromPng(org);
if (im_org == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to open '%s' Not a valid PNG file", fn_dest);
RETURN_FALSE;
}
break;
#endif /* HAVE_GD_PNG */
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Format not supported");
RETURN_FALSE;
break;
}
org_width = gdImageSX (im_org);
org_height = gdImageSY (im_org);
x_ratio = (float) org_width / (float) dest_width;
y_ratio = (float) org_height / (float) dest_height;
if (x_ratio > 1 && y_ratio > 1) {
if (y_ratio > x_ratio) {
x_ratio = y_ratio;
} else {
y_ratio = x_ratio;
}
dest_width = (int) (org_width / x_ratio);
dest_height = (int) (org_height / y_ratio);
} else {
x_ratio = (float) dest_width / (float) org_width;
y_ratio = (float) dest_height / (float) org_height;
if (y_ratio < x_ratio) {
x_ratio = y_ratio;
} else {
y_ratio = x_ratio;
}
dest_width = (int) (org_width * x_ratio);
dest_height = (int) (org_height * y_ratio);
}
im_tmp = gdImageCreate (dest_width, dest_height);
if (im_tmp == NULL ) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate temporary buffer");
RETURN_FALSE;
}
gdImageCopyResized (im_tmp, im_org, 0, 0, 0, 0, dest_width, dest_height, org_width, org_height);
gdImageDestroy(im_org);
fclose(org);
im_dest = gdImageCreate(dest_width, dest_height);
if (im_dest == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate destination buffer");
RETURN_FALSE;
}
white = gdImageColorAllocate(im_dest, 255, 255, 255);
if (white == -1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer");
RETURN_FALSE;
}
black = gdImageColorAllocate(im_dest, 0, 0, 0);
if (black == -1) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to allocate the colors for the destination buffer");
RETURN_FALSE;
}
int_threshold = int_threshold * 32;
for (y = 0; y < dest_height; y++) {
for (x = 0; x < dest_width; x++) {
color_org = gdImageGetPixel (im_tmp, x, y);
median = (im_tmp->red[color_org] + im_tmp->green[color_org] + im_tmp->blue[color_org]) / 3;
if (median < int_threshold) {
color = black;
} else {
color = white;
}
gdImageSetPixel (im_dest, x, y, color);
}
}
gdImageDestroy (im_tmp );
gdImageWBMP(im_dest, black , dest);
fflush(dest);
fclose(dest);
gdImageDestroy(im_dest);
RETURN_TRUE;
}
/* }}} */
/* Section Filters */
#define PHP_GD_SINGLE_RES \
zval *SIM; \
gdImagePtr im_src; \
if (zend_parse_parameters(1 TSRMLS_CC, "r", &SIM) == FAILURE) { \
RETURN_FALSE; \
} \
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd); \
if (im_src == NULL) { \
RETURN_FALSE; \
}
static void php_image_filter_negate(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageNegate(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_grayscale(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageGrayScale(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_brightness(INTERNAL_FUNCTION_PARAMETERS)
{
zval *SIM;
gdImagePtr im_src;
long brightness, tmp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zll", &SIM, &tmp, &brightness) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
if (im_src == NULL) {
RETURN_FALSE;
}
if (gdImageBrightness(im_src, (int)brightness) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_contrast(INTERNAL_FUNCTION_PARAMETERS)
{
zval *SIM;
gdImagePtr im_src;
long contrast, tmp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rll", &SIM, &tmp, &contrast) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
if (im_src == NULL) {
RETURN_FALSE;
}
if (gdImageContrast(im_src, (int)contrast) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_colorize(INTERNAL_FUNCTION_PARAMETERS)
{
zval *SIM;
gdImagePtr im_src;
long r,g,b,tmp;
long a = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll|l", &SIM, &tmp, &r, &g, &b, &a) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
if (im_src == NULL) {
RETURN_FALSE;
}
if (gdImageColor(im_src, (int) r, (int) g, (int) b, (int) a) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_edgedetect(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageEdgeDetectQuick(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_emboss(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageEmboss(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_gaussian_blur(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageGaussianBlur(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_selective_blur(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageSelectiveBlur(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_mean_removal(INTERNAL_FUNCTION_PARAMETERS)
{
PHP_GD_SINGLE_RES
if (gdImageMeanRemoval(im_src) == 1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_smooth(INTERNAL_FUNCTION_PARAMETERS)
{
zval *SIM;
long tmp;
gdImagePtr im_src;
double weight;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rld", &SIM, &tmp, &weight) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
if (im_src == NULL) {
RETURN_FALSE;
}
if (gdImageSmooth(im_src, (float)weight)==1) {
RETURN_TRUE;
}
RETURN_FALSE;
}
static void php_image_filter_pixelate(INTERNAL_FUNCTION_PARAMETERS)
{
zval *IM;
gdImagePtr im;
long tmp, blocksize;
zend_bool mode = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rll|b", &IM, &tmp, &blocksize, &mode) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (im == NULL) {
RETURN_FALSE;
}
if (gdImagePixelate(im, (int) blocksize, (const unsigned int) mode)) {
RETURN_TRUE;
}
RETURN_FALSE;
}
/* {{{ proto bool imagefilter(resource src_im, int filtertype, [args] )
Applies Filter an image using a custom angle */
PHP_FUNCTION(imagefilter)
{
zval *tmp;
typedef void (*image_filter)(INTERNAL_FUNCTION_PARAMETERS);
long filtertype;
image_filter filters[] =
{
php_image_filter_negate ,
php_image_filter_grayscale,
php_image_filter_brightness,
php_image_filter_contrast,
php_image_filter_colorize,
php_image_filter_edgedetect,
php_image_filter_emboss,
php_image_filter_gaussian_blur,
php_image_filter_selective_blur,
php_image_filter_mean_removal,
php_image_filter_smooth,
php_image_filter_pixelate
};
if (ZEND_NUM_ARGS() < 2 || ZEND_NUM_ARGS() > IMAGE_FILTER_MAX_ARGS) {
WRONG_PARAM_COUNT;
} else if (zend_parse_parameters(2 TSRMLS_CC, "rl", &tmp, &filtertype) == FAILURE) {
return;
}
if (filtertype >= 0 && filtertype <= IMAGE_FILTER_MAX) {
filters[filtertype](INTERNAL_FUNCTION_PARAM_PASSTHRU);
}
}
/* }}} */
/* {{{ proto resource imageconvolution(resource src_im, array matrix3x3, double div, double offset)
Apply a 3x3 convolution matrix, using coefficient div and offset */
PHP_FUNCTION(imageconvolution)
{
zval *SIM, *hash_matrix;
zval **var = NULL, **var2 = NULL;
gdImagePtr im_src = NULL;
double div, offset;
int nelem, i, j, res;
float matrix[3][3] = {{0,0,0}, {0,0,0}, {0,0,0}};
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "radd", &SIM, &hash_matrix, &div, &offset) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE(im_src, gdImagePtr, &SIM, -1, "Image", le_gd);
nelem = zend_hash_num_elements(Z_ARRVAL_P(hash_matrix));
if (nelem != 3) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have 3x3 array");
RETURN_FALSE;
}
for (i=0; i<3; i++) {
if (zend_hash_index_find(Z_ARRVAL_P(hash_matrix), (i), (void **) &var) == SUCCESS && Z_TYPE_PP(var) == IS_ARRAY) {
if (Z_TYPE_PP(var) != IS_ARRAY || zend_hash_num_elements(Z_ARRVAL_PP(var)) != 3 ) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have 3x3 array");
RETURN_FALSE;
}
for (j=0; j<3; j++) {
if (zend_hash_index_find(Z_ARRVAL_PP(var), (j), (void **) &var2) == SUCCESS) {
if (Z_TYPE_PP(var2) != IS_DOUBLE) {
zval dval;
dval = **var;
zval_copy_ctor(&dval);
convert_to_double(&dval);
matrix[i][j] = (float)Z_DVAL(dval);
} else {
matrix[i][j] = (float)Z_DVAL_PP(var2);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "You must have a 3x3 matrix");
RETURN_FALSE;
}
}
}
}
res = gdImageConvolution(im_src, matrix, (float)div, (float)offset);
if (res) {
RETURN_TRUE;
} else {
RETURN_FALSE;
}
}
/* }}} */
/* End section: Filters */
/* {{{ proto void imageflip(resource im, int mode)
Flip an image (in place) horizontally, vertically or both directions. */
PHP_FUNCTION(imageflip)
{
zval *IM;
long mode;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &IM, &mode) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
switch (mode) {
case GD_FLIP_VERTICAL:
gdImageFlipVertical(im);
break;
case GD_FLIP_HORINZONTAL:
gdImageFlipHorizontal(im);
break;
case GD_FLIP_BOTH:
gdImageFlipBoth(im);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown flip mode");
RETURN_FALSE;
}
RETURN_TRUE;
}
/* }}} */
#ifdef HAVE_GD_BUNDLED
/* {{{ proto bool imageantialias(resource im, bool on)
Should antialiased functions used or not*/
PHP_FUNCTION(imageantialias)
{
zval *IM;
zend_bool alias;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rb", &IM, &alias) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
gdImageAntialias(im, alias);
RETURN_TRUE;
}
/* }}} */
#endif
/* {{{ proto void imagecrop(resource im, array rect)
Crop an image using the given coordinates and size, x, y, width and height. */
PHP_FUNCTION(imagecrop)
{
zval *IM;
gdImagePtr im;
gdImagePtr im_crop;
gdRect rect;
zval *z_rect;
zval **tmp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ra", &IM, &z_rect) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (zend_hash_find(HASH_OF(z_rect), "x", sizeof("x"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.x = Z_LVAL(lval);
} else {
rect.x = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing x position");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(z_rect), "y", sizeof("x"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.y = Z_LVAL(lval);
} else {
rect.y = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing y position");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(z_rect), "width", sizeof("width"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.width = Z_LVAL(lval);
} else {
rect.width = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing width");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(z_rect), "height", sizeof("height"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.height = Z_LVAL(lval);
} else {
rect.height = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing height");
RETURN_FALSE;
}
im_crop = gdImageCrop(im, &rect);
if (im_crop == NULL) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, im_crop, le_gd);
}
}
/* }}} */
/* {{{ proto void imagecropauto(resource im [, int mode [, threshold [, color]]])
Crop an image automatically using one of the available modes. */
PHP_FUNCTION(imagecropauto)
{
zval *IM;
long mode = -1;
long color = -1;
double threshold = 0.5f;
gdImagePtr im;
gdImagePtr im_crop;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|ldl", &IM, &mode, &threshold, &color) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
switch (mode) {
case -1:
mode = GD_CROP_DEFAULT;
case GD_CROP_DEFAULT:
case GD_CROP_TRANSPARENT:
case GD_CROP_BLACK:
case GD_CROP_WHITE:
case GD_CROP_SIDES:
im_crop = gdImageCropAuto(im, mode);
break;
case GD_CROP_THRESHOLD:
if (color < 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Color argument missing with threshold mode");
RETURN_FALSE;
}
im_crop = gdImageCropThreshold(im, color, (float) threshold);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown crop mode");
RETURN_FALSE;
}
if (im_crop == NULL) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, im_crop, le_gd);
}
}
/* }}} */
/* {{{ proto resource imagescale(resource im, new_width[, new_height[, method]])
Scale an image using the given new width and height. */
PHP_FUNCTION(imagescale)
{
zval *IM;
gdImagePtr im;
gdImagePtr im_scaled;
int new_width, new_height = -1;
gdInterpolationMethod method = GD_BILINEAR_FIXED;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl|ll", &IM, &new_width, &new_height, &method) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
im_scaled = gdImageScale(im, new_width, new_height);
goto finish;
switch (method) {
case GD_NEAREST_NEIGHBOUR:
im_scaled = gdImageScaleNearestNeighbour(im, new_width, new_height);
break;
case GD_BILINEAR_FIXED:
im_scaled = gdImageScaleBilinear(im, new_width, new_height);
break;
case GD_BICUBIC:
im_scaled = gdImageScaleBicubicFixed(im, new_width, new_height);
break;
case GD_BICUBIC_FIXED:
im_scaled = gdImageScaleBicubicFixed(im, new_width, new_height);
break;
default:
im_scaled = gdImageScaleTwoPass(im, im->sx, im->sy, new_width, new_height);
break;
}
finish:
if (im_scaled == NULL) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, im_scaled, le_gd);
}
}
/* }}} */
/* {{{ proto resource imageaffine(resource src, array affine[, array clip])
Return an image containing the affine tramsformed src image, using an optional clipping area */
PHP_FUNCTION(imageaffine)
{
zval *IM;
gdImagePtr src;
gdImagePtr dst;
gdRect rect;
gdRectPtr pRect = NULL;
zval *z_rect = NULL;
zval *z_affine;
zval **tmp;
double affine[6];
int i, nelems;
zval **zval_affine_elem = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ra|a", &IM, &z_affine, &z_rect) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(src, gdImagePtr, &IM, -1, "Image", le_gd);
if ((nelems = zend_hash_num_elements(Z_ARRVAL_P(z_affine))) != 6) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Affine array must have six elements");
RETURN_FALSE;
}
for (i = 0; i < nelems; i++) {
if (zend_hash_index_find(Z_ARRVAL_P(z_affine), i, (void **) &zval_affine_elem) == SUCCESS) {
switch (Z_TYPE_PP(zval_affine_elem)) {
case IS_LONG:
affine[i] = Z_LVAL_PP(zval_affine_elem);
break;
case IS_DOUBLE:
affine[i] = Z_DVAL_PP(zval_affine_elem);
break;
case IS_STRING:
{
zval dval;
dval = **zval_affine_elem;
zval_copy_ctor(&dval);
convert_to_double(&dval);
affine[i] = Z_DVAL(dval);
}
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %i", i);
RETURN_FALSE;
}
}
}
if (z_rect != NULL) {
if (zend_hash_find(HASH_OF(z_rect), "x", sizeof("x"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.x = Z_LVAL(lval);
} else {
rect.x = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing x position");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(z_rect), "y", sizeof("x"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.y = Z_LVAL(lval);
} else {
rect.y = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing y position");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(z_rect), "width", sizeof("width"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.width = Z_LVAL(lval);
} else {
rect.width = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing width");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(z_rect), "height", sizeof("height"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_LONG) {
zval lval;
lval = **tmp;
zval_copy_ctor(&lval);
convert_to_long(&lval);
rect.height = Z_LVAL(lval);
} else {
rect.height = Z_LVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing height");
RETURN_FALSE;
}
pRect = ▭
} else {
rect.x = -1;
rect.y = -1;
rect.width = gdImageSX(src);
rect.height = gdImageSY(src);
pRect = NULL;
}
//int gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]);
if (gdTransformAffineGetImage(&dst, src, pRect, affine) != GD_TRUE) {
RETURN_FALSE;
}
if (dst == NULL) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, dst, le_gd);
}
}
/* }}} */
/* {{{ proto array imageaffinematrixget(type[, options])
Return an image containing the affine tramsformed src image, using an optional clipping area */
PHP_FUNCTION(imageaffinematrixget)
{
double affine[6];
long type;
zval *options;
zval **tmp;
int res = GD_FALSE, i;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|z", &type, &options) == FAILURE) {
return;
}
switch((gdAffineStandardMatrix)type) {
case GD_AFFINE_TRANSLATE:
case GD_AFFINE_SCALE: {
double x, y;
if (Z_TYPE_P(options) != IS_ARRAY) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Array expected as options");
}
if (zend_hash_find(HASH_OF(options), "x", sizeof("x"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_DOUBLE) {
zval dval;
dval = **tmp;
zval_copy_ctor(&dval);
convert_to_double(&dval);
x = Z_DVAL(dval);
} else {
x = Z_DVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing x position");
RETURN_FALSE;
}
if (zend_hash_find(HASH_OF(options), "y", sizeof("y"), (void **)&tmp) != FAILURE) {
if (Z_TYPE_PP(tmp) != IS_DOUBLE) {
zval dval;
dval = **tmp;
zval_copy_ctor(&dval);
convert_to_double(&dval);
y = Z_DVAL(dval);
} else {
y = Z_DVAL_PP(tmp);
}
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Missing y position");
RETURN_FALSE;
}
if (type == GD_AFFINE_TRANSLATE) {
res = gdAffineTranslate(affine, x, y);
} else {
res = gdAffineScale(affine, x, y);
}
break;
}
case GD_AFFINE_ROTATE:
case GD_AFFINE_SHEAR_HORIZONTAL:
case GD_AFFINE_SHEAR_VERTICAL: {
double angle;
convert_to_double_ex(&options);
angle = Z_DVAL_P(options);
if (type == GD_AFFINE_SHEAR_HORIZONTAL) {
res = gdAffineShearHorizontal(affine, angle);
} else if (type == GD_AFFINE_SHEAR_VERTICAL) {
res = gdAffineShearVertical(affine, angle);
} else {
res = gdAffineRotate(affine, angle);
}
break;
}
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %li", type);
RETURN_FALSE;
}
if (res == GD_FALSE) {
RETURN_FALSE;
} else {
array_init(return_value);
for (i = 0; i < 6; i++) {
add_index_double(return_value, i, affine[i]);
}
}
}
/* {{{ proto array imageaffineconcat(array m1, array m2)
Concat two matrices (as in doing many ops in one go) */
PHP_FUNCTION(imageaffinematrixconcat)
{
double m1[6];
double m2[6];
double mr[6];
zval **tmp;
zval *z_m1;
zval *z_m2;
int i, nelems;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "aa", &z_m1, &z_m2) == FAILURE) {
return;
}
if (((nelems = zend_hash_num_elements(Z_ARRVAL_P(z_m1))) != 6) || (nelems = zend_hash_num_elements(Z_ARRVAL_P(z_m2))) != 6) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Affine arrays must have six elements");
RETURN_FALSE;
}
for (i = 0; i < 6; i++) {
if (zend_hash_index_find(Z_ARRVAL_P(z_m1), i, (void **) &tmp) == SUCCESS) {
switch (Z_TYPE_PP(tmp)) {
case IS_LONG:
m1[i] = Z_LVAL_PP(tmp);
break;
case IS_DOUBLE:
m1[i] = Z_DVAL_PP(tmp);
break;
case IS_STRING:
{
zval dval;
dval = **tmp;
zval_copy_ctor(&dval);
convert_to_double(&dval);
m1[i] = Z_DVAL(dval);
}
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %i", i);
RETURN_FALSE;
}
}
if (zend_hash_index_find(Z_ARRVAL_P(z_m2), i, (void **) &tmp) == SUCCESS) {
switch (Z_TYPE_PP(tmp)) {
case IS_LONG:
m2[i] = Z_LVAL_PP(tmp);
break;
case IS_DOUBLE:
m2[i] = Z_DVAL_PP(tmp);
break;
case IS_STRING:
{
zval dval;
dval = **tmp;
zval_copy_ctor(&dval);
convert_to_double(&dval);
m2[i] = Z_DVAL(dval);
}
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid type for element %i", i);
RETURN_FALSE;
}
}
}
if (gdAffineConcat (mr, m1, m2) != GD_TRUE) {
RETURN_FALSE;
}
array_init(return_value);
for (i = 0; i < 6; i++) {
add_index_double(return_value, i, mr[i]);
}
}
/* {{{ proto resource imagesetinterpolation(resource im, [, method]])
Set the default interpolation method, passing -1 or 0 sets it to the libgd default (bilinear). */
PHP_FUNCTION(imagesetinterpolation)
{
zval *IM;
gdImagePtr im;
long method = GD_BILINEAR_FIXED;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|l", &IM, &method) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
if (method == -1) {
method = GD_BILINEAR_FIXED;
}
RETURN_BOOL(gdImageSetInterpolationMethod(im, (gdInterpolationMethod) method));
}
/* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 fdm=marker
* vim<600: sw=4 ts=4
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2087_1 |
crossvul-cpp_data_good_4800_1 | /* $Id$
*
* tiff2pdf - converts a TIFF image to a PDF document
*
* Copyright (c) 2003 Ross Finlayson
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee, provided
* that (i) the above copyright notices and this permission notice appear in
* all copies of the software and related documentation, and (ii) the name of
* Ross Finlayson may not be used in any advertising or
* publicity relating to the software without the specific, prior written
* permission of Ross Finlayson.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL ROSS FINLAYSON BE LIABLE FOR
* ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
* OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
* LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include "tif_config.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <errno.h>
#include <limits.h>
#if HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
# include <fcntl.h>
#endif
#ifdef HAVE_IO_H
# include <io.h>
#endif
#ifdef NEED_LIBPORT
# include "libport.h"
#endif
#include "tiffiop.h"
#include "tiffio.h"
#ifndef HAVE_GETOPT
extern int getopt(int, char**, char*);
#endif
#ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
#endif
#ifndef EXIT_FAILURE
# define EXIT_FAILURE 1
#endif
#define TIFF2PDF_MODULE "tiff2pdf"
#define PS_UNIT_SIZE 72.0F
/* This type is of PDF color spaces. */
typedef enum {
T2P_CS_BILEVEL = 0x01, /* Bilevel, black and white */
T2P_CS_GRAY = 0x02, /* Single channel */
T2P_CS_RGB = 0x04, /* Three channel tristimulus RGB */
T2P_CS_CMYK = 0x08, /* Four channel CMYK print inkset */
T2P_CS_LAB = 0x10, /* Three channel L*a*b* color space */
T2P_CS_PALETTE = 0x1000,/* One of the above with a color map */
T2P_CS_CALGRAY = 0x20, /* Calibrated single channel */
T2P_CS_CALRGB = 0x40, /* Calibrated three channel tristimulus RGB */
T2P_CS_ICCBASED = 0x80 /* ICC profile color specification */
} t2p_cs_t;
/* This type is of PDF compression types. */
typedef enum{
T2P_COMPRESS_NONE=0x00
#ifdef CCITT_SUPPORT
, T2P_COMPRESS_G4=0x01
#endif
#if defined(JPEG_SUPPORT) || defined(OJPEG_SUPPORT)
, T2P_COMPRESS_JPEG=0x02
#endif
#ifdef ZIP_SUPPORT
, T2P_COMPRESS_ZIP=0x04
#endif
} t2p_compress_t;
/* This type is whether TIFF image data can be used in PDF without transcoding. */
typedef enum{
T2P_TRANSCODE_RAW=0x01, /* The raw data from the input can be used without recompressing */
T2P_TRANSCODE_ENCODE=0x02 /* The data from the input is perhaps unencoded and reencoded */
} t2p_transcode_t;
/* This type is of information about the data samples of the input image. */
typedef enum{
T2P_SAMPLE_NOTHING=0x0000, /* The unencoded samples are normal for the output colorspace */
T2P_SAMPLE_ABGR_TO_RGB=0x0001, /* The unencoded samples are the result of ReadRGBAImage */
T2P_SAMPLE_RGBA_TO_RGB=0x0002, /* The unencoded samples are contiguous RGBA */
T2P_SAMPLE_RGBAA_TO_RGB=0x0004, /* The unencoded samples are RGBA with premultiplied alpha */
T2P_SAMPLE_YCBCR_TO_RGB=0x0008,
T2P_SAMPLE_YCBCR_TO_LAB=0x0010,
T2P_SAMPLE_REALIZE_PALETTE=0x0020, /* The unencoded samples are indexes into the color map */
T2P_SAMPLE_SIGNED_TO_UNSIGNED=0x0040, /* The unencoded samples are signed instead of unsignd */
T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED=0x0040, /* The L*a*b* samples have a* and b* signed */
T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG=0x0100 /* The unencoded samples are separate instead of contiguous */
} t2p_sample_t;
/* This type is of error status of the T2P struct. */
typedef enum{
T2P_ERR_OK = 0, /* This is the value of t2p->t2p_error when there is no error */
T2P_ERR_ERROR = 1 /* This is the value of t2p->t2p_error when there was an error */
} t2p_err_t;
/* This struct defines a logical page of a TIFF. */
typedef struct {
tdir_t page_directory;
uint32 page_number;
ttile_t page_tilecount;
uint32 page_extra;
} T2P_PAGE;
/* This struct defines a PDF rectangle's coordinates. */
typedef struct {
float x1;
float y1;
float x2;
float y2;
float mat[9];
} T2P_BOX;
/* This struct defines a tile of a PDF. */
typedef struct {
T2P_BOX tile_box;
} T2P_TILE;
/* This struct defines information about the tiles on a PDF page. */
typedef struct {
ttile_t tiles_tilecount;
uint32 tiles_tilewidth;
uint32 tiles_tilelength;
uint32 tiles_tilecountx;
uint32 tiles_tilecounty;
uint32 tiles_edgetilewidth;
uint32 tiles_edgetilelength;
T2P_TILE* tiles_tiles;
} T2P_TILES;
/* This struct is the context of a function to generate PDF from a TIFF. */
typedef struct {
t2p_err_t t2p_error;
T2P_PAGE* tiff_pages;
T2P_TILES* tiff_tiles;
tdir_t tiff_pagecount;
uint16 tiff_compression;
uint16 tiff_photometric;
uint16 tiff_fillorder;
uint16 tiff_bitspersample;
uint16 tiff_samplesperpixel;
uint16 tiff_planar;
uint32 tiff_width;
uint32 tiff_length;
float tiff_xres;
float tiff_yres;
uint16 tiff_orientation;
toff_t tiff_dataoffset;
tsize_t tiff_datasize;
uint16 tiff_resunit;
uint16 pdf_centimeters;
uint16 pdf_overrideres;
uint16 pdf_overridepagesize;
float pdf_defaultxres;
float pdf_defaultyres;
float pdf_xres;
float pdf_yres;
float pdf_defaultpagewidth;
float pdf_defaultpagelength;
float pdf_pagewidth;
float pdf_pagelength;
float pdf_imagewidth;
float pdf_imagelength;
int pdf_image_fillpage; /* 0 (default: no scaling, 1:scale imagesize to pagesize */
T2P_BOX pdf_mediabox;
T2P_BOX pdf_imagebox;
uint16 pdf_majorversion;
uint16 pdf_minorversion;
uint32 pdf_catalog;
uint32 pdf_pages;
uint32 pdf_info;
uint32 pdf_palettecs;
uint16 pdf_fitwindow;
uint32 pdf_startxref;
#define TIFF2PDF_FILEID_SIZE 33
char pdf_fileid[TIFF2PDF_FILEID_SIZE];
#define TIFF2PDF_DATETIME_SIZE 17
char pdf_datetime[TIFF2PDF_DATETIME_SIZE];
#define TIFF2PDF_CREATOR_SIZE 512
char pdf_creator[TIFF2PDF_CREATOR_SIZE];
#define TIFF2PDF_AUTHOR_SIZE 512
char pdf_author[TIFF2PDF_AUTHOR_SIZE];
#define TIFF2PDF_TITLE_SIZE 512
char pdf_title[TIFF2PDF_TITLE_SIZE];
#define TIFF2PDF_SUBJECT_SIZE 512
char pdf_subject[TIFF2PDF_SUBJECT_SIZE];
#define TIFF2PDF_KEYWORDS_SIZE 512
char pdf_keywords[TIFF2PDF_KEYWORDS_SIZE];
t2p_cs_t pdf_colorspace;
uint16 pdf_colorspace_invert;
uint16 pdf_switchdecode;
uint16 pdf_palettesize;
unsigned char* pdf_palette;
int pdf_labrange[4];
t2p_compress_t pdf_defaultcompression;
uint16 pdf_defaultcompressionquality;
t2p_compress_t pdf_compression;
uint16 pdf_compressionquality;
uint16 pdf_nopassthrough;
t2p_transcode_t pdf_transcode;
t2p_sample_t pdf_sample;
uint32* pdf_xrefoffsets;
uint32 pdf_xrefcount;
tdir_t pdf_page;
#ifdef OJPEG_SUPPORT
tdata_t pdf_ojpegdata;
uint32 pdf_ojpegdatalength;
uint32 pdf_ojpegiflength;
#endif
float tiff_whitechromaticities[2];
float tiff_primarychromaticities[6];
float tiff_referenceblackwhite[2];
float* tiff_transferfunction[3];
int pdf_image_interpolate; /* 0 (default) : do not interpolate,
1 : interpolate */
uint16 tiff_transferfunctioncount;
uint32 pdf_icccs;
uint32 tiff_iccprofilelength;
tdata_t tiff_iccprofile;
/* fields for custom read/write procedures */
FILE *outputfile;
int outputdisable;
tsize_t outputwritten;
} T2P;
/* These functions are called by main. */
void tiff2pdf_usage(void);
int tiff2pdf_match_paper_size(float*, float*, char*);
/* These functions are used to generate a PDF from a TIFF. */
#ifdef __cplusplus
extern "C" {
#endif
T2P* t2p_init(void);
void t2p_validate(T2P*);
tsize_t t2p_write_pdf(T2P*, TIFF*, TIFF*);
void t2p_free(T2P*);
#ifdef __cplusplus
}
#endif
void t2p_read_tiff_init(T2P*, TIFF*);
int t2p_cmp_t2p_page(const void*, const void*);
void t2p_read_tiff_data(T2P*, TIFF*);
void t2p_read_tiff_size(T2P*, TIFF*);
void t2p_read_tiff_size_tile(T2P*, TIFF*, ttile_t);
int t2p_tile_is_right_edge(T2P_TILES, ttile_t);
int t2p_tile_is_bottom_edge(T2P_TILES, ttile_t);
int t2p_tile_is_edge(T2P_TILES, ttile_t);
int t2p_tile_is_corner_edge(T2P_TILES, ttile_t);
tsize_t t2p_readwrite_pdf_image(T2P*, TIFF*, TIFF*);
tsize_t t2p_readwrite_pdf_image_tile(T2P*, TIFF*, TIFF*, ttile_t);
#ifdef OJPEG_SUPPORT
int t2p_process_ojpeg_tables(T2P*, TIFF*);
#endif
#ifdef JPEG_SUPPORT
int t2p_process_jpeg_strip(unsigned char*, tsize_t*, unsigned char*, tsize_t, tsize_t*, tstrip_t, uint32);
#endif
void t2p_tile_collapse_left(tdata_t, tsize_t, uint32, uint32, uint32);
void t2p_write_advance_directory(T2P*, TIFF*);
tsize_t t2p_sample_planar_separate_to_contig(T2P*, unsigned char*, unsigned char*, tsize_t);
tsize_t t2p_sample_realize_palette(T2P*, unsigned char*);
tsize_t t2p_sample_abgr_to_rgb(tdata_t, uint32);
tsize_t t2p_sample_rgba_to_rgb(tdata_t, uint32);
tsize_t t2p_sample_rgbaa_to_rgb(tdata_t, uint32);
tsize_t t2p_sample_lab_signed_to_unsigned(tdata_t, uint32);
tsize_t t2p_write_pdf_header(T2P*, TIFF*);
tsize_t t2p_write_pdf_obj_start(uint32, TIFF*);
tsize_t t2p_write_pdf_obj_end(TIFF*);
tsize_t t2p_write_pdf_name(unsigned char*, TIFF*);
tsize_t t2p_write_pdf_string(char*, TIFF*);
tsize_t t2p_write_pdf_stream(tdata_t, tsize_t, TIFF*);
tsize_t t2p_write_pdf_stream_start(TIFF*);
tsize_t t2p_write_pdf_stream_end(TIFF*);
tsize_t t2p_write_pdf_stream_dict(tsize_t, uint32, TIFF*);
tsize_t t2p_write_pdf_stream_dict_start(TIFF*);
tsize_t t2p_write_pdf_stream_dict_end(TIFF*);
tsize_t t2p_write_pdf_stream_length(tsize_t, TIFF*);
tsize_t t2p_write_pdf_catalog(T2P*, TIFF*);
tsize_t t2p_write_pdf_info(T2P*, TIFF*, TIFF*);
void t2p_pdf_currenttime(T2P*);
void t2p_pdf_tifftime(T2P*, TIFF*);
tsize_t t2p_write_pdf_pages(T2P*, TIFF*);
tsize_t t2p_write_pdf_page(uint32, T2P*, TIFF*);
void t2p_compose_pdf_page(T2P*);
void t2p_compose_pdf_page_orient(T2P_BOX*, uint16);
void t2p_compose_pdf_page_orient_flip(T2P_BOX*, uint16);
tsize_t t2p_write_pdf_page_content(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_stream_dict(ttile_t, T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_cs(T2P*, TIFF*);
tsize_t t2p_write_pdf_transfer(T2P*, TIFF*);
tsize_t t2p_write_pdf_transfer_dict(T2P*, TIFF*, uint16);
tsize_t t2p_write_pdf_transfer_stream(T2P*, TIFF*, uint16);
tsize_t t2p_write_pdf_xobject_calcs(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_icccs(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_icccs_dict(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_icccs_stream(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_cs_stream(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_decode(T2P*, TIFF*);
tsize_t t2p_write_pdf_xobject_stream_filter(ttile_t, T2P*, TIFF*);
tsize_t t2p_write_pdf_xreftable(T2P*, TIFF*);
tsize_t t2p_write_pdf_trailer(T2P*, TIFF*);
#define check_snprintf_ret(t2p, rv, buf) do { \
if ((rv) < 0) rv = 0; \
else if((rv) >= (int)sizeof(buf)) (rv) = sizeof(buf) - 1; \
else break; \
if ((t2p) != NULL) (t2p)->t2p_error = T2P_ERR_ERROR; \
} while(0)
static void
t2p_disable(TIFF *tif)
{
T2P *t2p = (T2P*) TIFFClientdata(tif);
t2p->outputdisable = 1;
}
static void
t2p_enable(TIFF *tif)
{
T2P *t2p = (T2P*) TIFFClientdata(tif);
t2p->outputdisable = 0;
}
/*
* Procs for TIFFClientOpen
*/
#ifdef OJPEG_SUPPORT
static tmsize_t
t2pReadFile(TIFF *tif, tdata_t data, tmsize_t size)
{
thandle_t client = TIFFClientdata(tif);
TIFFReadWriteProc proc = TIFFGetReadProc(tif);
if (proc)
return proc(client, data, size);
return -1;
}
#endif /* OJPEG_SUPPORT */
static tmsize_t
t2pWriteFile(TIFF *tif, tdata_t data, tmsize_t size)
{
thandle_t client = TIFFClientdata(tif);
TIFFReadWriteProc proc = TIFFGetWriteProc(tif);
if (proc)
return proc(client, data, size);
return -1;
}
static uint64
t2pSeekFile(TIFF *tif, toff_t offset, int whence)
{
thandle_t client = TIFFClientdata(tif);
TIFFSeekProc proc = TIFFGetSeekProc(tif);
if (proc)
return proc(client, offset, whence);
return -1;
}
static tmsize_t
t2p_readproc(thandle_t handle, tdata_t data, tmsize_t size)
{
(void) handle, (void) data, (void) size;
return -1;
}
static tmsize_t
t2p_writeproc(thandle_t handle, tdata_t data, tmsize_t size)
{
T2P *t2p = (T2P*) handle;
if (t2p->outputdisable <= 0 && t2p->outputfile) {
tsize_t written = fwrite(data, 1, size, t2p->outputfile);
t2p->outputwritten += written;
return written;
}
return size;
}
static uint64
t2p_seekproc(thandle_t handle, uint64 offset, int whence)
{
T2P *t2p = (T2P*) handle;
if (t2p->outputdisable <= 0 && t2p->outputfile)
return _TIFF_fseek_f(t2p->outputfile, (_TIFF_off_t) offset, whence);
return offset;
}
static int
t2p_closeproc(thandle_t handle)
{
T2P *t2p = (T2P*) handle;
return fclose(t2p->outputfile);
}
static uint64
t2p_sizeproc(thandle_t handle)
{
(void) handle;
return -1;
}
static int
t2p_mapproc(thandle_t handle, void **data, toff_t *offset)
{
(void) handle, (void) data, (void) offset;
return -1;
}
static void
t2p_unmapproc(thandle_t handle, void *data, toff_t offset)
{
(void) handle, (void) data, (void) offset;
}
#if defined(OJPEG_SUPPORT) || defined(JPEG_SUPPORT)
static uint64
checkAdd64(uint64 summand1, uint64 summand2, T2P* t2p)
{
uint64 bytes = summand1 + summand2;
if (bytes < summand1) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
bytes = 0;
}
return bytes;
}
#endif /* defined(OJPEG_SUPPORT) || defined(JPEG_SUPPORT) */
static uint64
checkMultiply64(uint64 first, uint64 second, T2P* t2p)
{
uint64 bytes = first * second;
if (second && bytes / second != first) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
bytes = 0;
}
return bytes;
}
/*
This is the main function.
The program converts one TIFF file to one PDF file, including multiple page
TIFF files, tiled TIFF files, black and white. grayscale, and color TIFF
files that contain data of TIFF photometric interpretations of bilevel,
grayscale, RGB, YCbCr, CMYK separation, and ICC L*a*b* as supported by
libtiff and PDF.
If you have multiple TIFF files to convert into one PDF file then use tiffcp
or other program to concatenate the files into a multiple page TIFF file.
If the input TIFF file is of huge dimensions (greater than 10000 pixels height
or width) convert the input image to a tiled TIFF if it is not already.
The standard output is standard output. Set the output file name with the
"-o output.pdf" option.
All black and white files are compressed into a single strip CCITT G4 Fax
compressed PDF, unless tiled, where tiled black and white images are
compressed into tiled CCITT G4 Fax compressed PDF, libtiff CCITT support
is assumed.
Color and grayscale data can be compressed using either JPEG compression,
ITU-T T.81, or Zip/Deflate LZ77 compression, per PNG 1.2 and RFC 1951. Set
the compression type using the -j or -z options. JPEG compression support
requires that libtiff be configured with JPEG support, and Zip/Deflate
compression support requires that libtiff is configured with Zip support,
in tiffconf.h. Use only one or the other of -j and -z. The -q option
sets the image compression quality, that is 1-100 with libjpeg JPEG
compression and one of 1, 10, 11, 12, 13, 14, or 15 for PNG group compression
predictor methods, add 100, 200, ..., 900 to set zlib compression quality 1-9.
PNG Group differencing predictor methods are not currently implemented.
If the input TIFF contains single strip CCITT G4 Fax compressed information,
then that is written to the PDF file without transcoding, unless the options
of no compression and no passthrough are set, -d and -n.
If the input TIFF contains JPEG or single strip Zip/Deflate compressed
information, and they are configured, then that is written to the PDF file
without transcoding, unless the options of no compression and no passthrough
are set.
The default page size upon which the TIFF image is placed is determined by
the resolution and extent of the image data. Default values for the TIFF
image resolution can be set using the -x and -y options. The page size can
be set using the -p option for paper size, or -w and -l for paper width and
length, then each page of the TIFF image is centered on its page. The
distance unit for default resolution and page width and length can be set
by the -u option, the default unit is inch.
Various items of the output document information can be set with the -e, -c,
-a, -t, -s, and -k tags. Setting the argument of the option to "" for these
tags causes the relevant document information field to be not written. Some
of the document information values otherwise get their information from the
input TIFF image, the software, author, document name, and image description.
The output PDF file conforms to the PDF 1.1 specification or PDF 1.2 if using
Zip/Deflate compression.
The Portable Document Format (PDF) specification is copyrighted by Adobe
Systems, Incorporated. Todos derechos reservados.
Here is a listing of the usage example and the options to the tiff2pdf
program that is part of the libtiff distribution. Options followed by
a colon have a required argument.
usage: tiff2pdf [options] input.tif
options:
-o: output to file name
-j: compress with JPEG (requires libjpeg configured with libtiff)
-z: compress with Zip/Deflate (requires zlib configured with libtiff)
-q: compression quality
-n: no compressed data passthrough
-d: do not compress (decompress)
-i: invert colors
-u: set distance unit, 'i' for inch, 'm' for centimeter
-x: set x resolution default
-y: set y resolution default
-w: width in units
-l: length in units
-r: 'd' for resolution default, 'o' for resolution override
-p: paper size, eg "letter", "legal", "a4"
-F: make the tiff fill the PDF page
-f: set pdf "fit window" user preference
-b: set PDF "Interpolate" user preference
-e: date, overrides image or current date/time default, YYYYMMDDHHMMSS
-c: creator, overrides image software default
-a: author, overrides image artist default
-t: title, overrides image document name default
-s: subject, overrides image image description default
-k: keywords
-h: usage
examples:
tiff2pdf -o output.pdf input.tiff
The above example would generate the file output.pdf from input.tiff.
tiff2pdf input.tiff
The above example would generate PDF output from input.tiff and write it
to standard output.
tiff2pdf -j -p letter -o output.pdf input.tiff
The above example would generate the file output.pdf from input.tiff,
putting the image pages on a letter sized page, compressing the output
with JPEG.
Please report bugs through:
http://bugzilla.remotesensing.org/buglist.cgi?product=libtiff
See also libtiff.3t, tiffcp.
*/
int main(int argc, char** argv){
#if !HAVE_DECL_OPTARG
extern char *optarg;
extern int optind;
#endif
const char *outfilename = NULL;
T2P *t2p = NULL;
TIFF *input = NULL, *output = NULL;
int c, ret = EXIT_SUCCESS;
t2p = t2p_init();
if (t2p == NULL){
TIFFError(TIFF2PDF_MODULE, "Can't initialize context");
goto fail;
}
while (argv &&
(c = getopt(argc, argv,
"o:q:u:x:y:w:l:r:p:e:c:a:t:s:k:jzndifbhF")) != -1){
switch (c) {
case 'o':
outfilename = optarg;
break;
#ifdef JPEG_SUPPORT
case 'j':
t2p->pdf_defaultcompression=T2P_COMPRESS_JPEG;
break;
#endif
#ifndef JPEG_SUPPORT
case 'j':
TIFFWarning(
TIFF2PDF_MODULE,
"JPEG support in libtiff required for JPEG compression, ignoring option");
break;
#endif
#ifdef ZIP_SUPPORT
case 'z':
t2p->pdf_defaultcompression=T2P_COMPRESS_ZIP;
break;
#endif
#ifndef ZIP_SUPPORT
case 'z':
TIFFWarning(
TIFF2PDF_MODULE,
"Zip support in libtiff required for Zip compression, ignoring option");
break;
#endif
case 'q':
t2p->pdf_defaultcompressionquality=atoi(optarg);
break;
case 'n':
t2p->pdf_nopassthrough=1;
break;
case 'd':
t2p->pdf_defaultcompression=T2P_COMPRESS_NONE;
break;
case 'u':
if(optarg[0]=='m'){
t2p->pdf_centimeters=1;
}
break;
case 'x':
t2p->pdf_defaultxres =
(float)atof(optarg) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'y':
t2p->pdf_defaultyres =
(float)atof(optarg) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'w':
t2p->pdf_overridepagesize=1;
t2p->pdf_defaultpagewidth =
((float)atof(optarg) * PS_UNIT_SIZE) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'l':
t2p->pdf_overridepagesize=1;
t2p->pdf_defaultpagelength =
((float)atof(optarg) * PS_UNIT_SIZE) / (t2p->pdf_centimeters?2.54F:1.0F);
break;
case 'r':
if(optarg[0]=='o'){
t2p->pdf_overrideres=1;
}
break;
case 'p':
if(tiff2pdf_match_paper_size(
&(t2p->pdf_defaultpagewidth),
&(t2p->pdf_defaultpagelength),
optarg)){
t2p->pdf_overridepagesize=1;
} else {
TIFFWarning(TIFF2PDF_MODULE,
"Unknown paper size %s, ignoring option",
optarg);
}
break;
case 'i':
t2p->pdf_colorspace_invert=1;
break;
case 'F':
t2p->pdf_image_fillpage = 1;
break;
case 'f':
t2p->pdf_fitwindow=1;
break;
case 'e':
if (strlen(optarg) == 0) {
t2p->pdf_datetime[0] = '\0';
} else {
t2p->pdf_datetime[0] = 'D';
t2p->pdf_datetime[1] = ':';
strncpy(t2p->pdf_datetime + 2, optarg,
sizeof(t2p->pdf_datetime) - 3);
t2p->pdf_datetime[sizeof(t2p->pdf_datetime) - 1] = '\0';
}
break;
case 'c':
strncpy(t2p->pdf_creator, optarg, sizeof(t2p->pdf_creator) - 1);
t2p->pdf_creator[sizeof(t2p->pdf_creator) - 1] = '\0';
break;
case 'a':
strncpy(t2p->pdf_author, optarg, sizeof(t2p->pdf_author) - 1);
t2p->pdf_author[sizeof(t2p->pdf_author) - 1] = '\0';
break;
case 't':
strncpy(t2p->pdf_title, optarg, sizeof(t2p->pdf_title) - 1);
t2p->pdf_title[sizeof(t2p->pdf_title) - 1] = '\0';
break;
case 's':
strncpy(t2p->pdf_subject, optarg, sizeof(t2p->pdf_subject) - 1);
t2p->pdf_subject[sizeof(t2p->pdf_subject) - 1] = '\0';
break;
case 'k':
strncpy(t2p->pdf_keywords, optarg, sizeof(t2p->pdf_keywords) - 1);
t2p->pdf_keywords[sizeof(t2p->pdf_keywords) - 1] = '\0';
break;
case 'b':
t2p->pdf_image_interpolate = 1;
break;
case 'h':
case '?':
tiff2pdf_usage();
goto success;
break;
}
}
/*
* Input
*/
if(argc > optind) {
input = TIFFOpen(argv[optind++], "r");
if (input==NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't open input file %s for reading",
argv[optind-1]);
goto fail;
}
} else {
TIFFError(TIFF2PDF_MODULE, "No input file specified");
tiff2pdf_usage();
goto fail;
}
if(argc > optind) {
TIFFError(TIFF2PDF_MODULE,
"No support for multiple input files");
tiff2pdf_usage();
goto fail;
}
/*
* Output
*/
t2p->outputdisable = 1;
if (outfilename) {
t2p->outputfile = fopen(outfilename, "wb");
if (t2p->outputfile == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't open output file %s for writing",
outfilename);
goto fail;
}
} else {
outfilename = "-";
t2p->outputfile = stdout;
}
output = TIFFClientOpen(outfilename, "w", (thandle_t) t2p,
t2p_readproc, t2p_writeproc, t2p_seekproc,
t2p_closeproc, t2p_sizeproc,
t2p_mapproc, t2p_unmapproc);
t2p->outputdisable = 0;
if (output == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't initialize output descriptor");
goto fail;
}
/*
* Validate
*/
t2p_validate(t2p);
t2pSeekFile(output, (toff_t) 0, SEEK_SET);
/*
* Write
*/
t2p_write_pdf(t2p, input, output);
if (t2p->t2p_error != 0) {
TIFFError(TIFF2PDF_MODULE,
"An error occurred creating output PDF file");
goto fail;
}
goto success;
fail:
ret = EXIT_FAILURE;
success:
if(input != NULL)
TIFFClose(input);
if (output != NULL)
TIFFClose(output);
if (t2p != NULL)
t2p_free(t2p);
return ret;
}
void tiff2pdf_usage(){
char* lines[]={
"usage: tiff2pdf [options] input.tiff",
"options:",
" -o: output to file name",
#ifdef JPEG_SUPPORT
" -j: compress with JPEG",
#endif
#ifdef ZIP_SUPPORT
" -z: compress with Zip/Deflate",
#endif
" -q: compression quality",
" -n: no compressed data passthrough",
" -d: do not compress (decompress)",
" -i: invert colors",
" -u: set distance unit, 'i' for inch, 'm' for centimeter",
" -x: set x resolution default in dots per unit",
" -y: set y resolution default in dots per unit",
" -w: width in units",
" -l: length in units",
" -r: 'd' for resolution default, 'o' for resolution override",
" -p: paper size, eg \"letter\", \"legal\", \"A4\"",
" -F: make the tiff fill the PDF page",
" -f: set PDF \"Fit Window\" user preference",
" -e: date, overrides image or current date/time default, YYYYMMDDHHMMSS",
" -c: sets document creator, overrides image software default",
" -a: sets document author, overrides image artist default",
" -t: sets document title, overrides image document name default",
" -s: sets document subject, overrides image image description default",
" -k: sets document keywords",
" -b: set PDF \"Interpolate\" user preference",
" -h: usage",
NULL
};
int i=0;
fprintf(stderr, "%s\n\n", TIFFGetVersion());
for (i=0;lines[i]!=NULL;i++){
fprintf(stderr, "%s\n", lines[i]);
}
return;
}
int tiff2pdf_match_paper_size(float* width, float* length, char* papersize){
size_t i, len;
const char* sizes[]={
"LETTER", "A4", "LEGAL",
"EXECUTIVE", "LETTER", "LEGAL", "LEDGER", "TABLOID",
"A", "B", "C", "D", "E", "F", "G", "H", "J", "K",
"A10", "A9", "A8", "A7", "A6", "A5", "A4", "A3", "A2", "A1", "A0",
"2A0", "4A0", "2A", "4A",
"B10", "B9", "B8", "B7", "B6", "B5", "B4", "B3", "B2", "B1", "B0",
"JISB10", "JISB9", "JISB8", "JISB7", "JISB6", "JISB5", "JISB4",
"JISB3", "JISB2", "JISB1", "JISB0",
"C10", "C9", "C8", "C7", "C6", "C5", "C4", "C3", "C2", "C1", "C0",
"RA2", "RA1", "RA0", "SRA4", "SRA3", "SRA2", "SRA1", "SRA0",
"A3EXTRA", "A4EXTRA",
"STATEMENT", "FOLIO", "QUARTO",
NULL
} ;
const int widths[]={
612, 595, 612,
522, 612,612,792,792,
612,792,1224,1584,2448,2016,792,2016,2448,2880,
74,105,147,210,298,420,595,842,1191,1684,2384,3370,4768,3370,4768,
88,125,176,249,354,499,709,1001,1417,2004,2835,
91,128,181,258,363,516,729,1032,1460,2064,2920,
79,113,162,230,323,459,649,918,1298,1298,2599,
1219,1729,2438,638,907,1276,1814,2551,
914,667,
396, 612, 609,
0
};
const int lengths[]={
792,842,1008,
756,792,1008,1224,1224,
792,1224,1584,2448,3168,2880,6480,10296,12672,10296,
105,147,210,298,420,595,842,1191,1684,2384,3370,4768,6741,4768,6741,
125,176,249,354,499,709,1001,1417,2004,2835,4008,
128,181,258,363,516,729,1032,1460,2064,2920,4127,
113,162,230,323,459,649,918,1298,1837,1837,3677,
1729,2438,3458,907,1276,1814,2551,3628,
1262,914,
612, 936, 780,
0
};
len=strlen(papersize);
for(i=0;i<len;i++){
papersize[i]=toupper((int) papersize[i]);
}
for(i=0;sizes[i]!=NULL; i++){
if (strcmp( (const char*)papersize, sizes[i])==0){
*width=(float)widths[i];
*length=(float)lengths[i];
return(1);
}
}
return(0);
}
/*
* This function allocates and initializes a T2P context struct pointer.
*/
T2P* t2p_init()
{
T2P* t2p = (T2P*) _TIFFmalloc(sizeof(T2P));
if(t2p==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_init",
(unsigned long) sizeof(T2P));
return( (T2P*) NULL );
}
_TIFFmemset(t2p, 0x00, sizeof(T2P));
t2p->pdf_majorversion=1;
t2p->pdf_minorversion=1;
t2p->pdf_defaultxres=300.0;
t2p->pdf_defaultyres=300.0;
t2p->pdf_defaultpagewidth=612.0;
t2p->pdf_defaultpagelength=792.0;
t2p->pdf_xrefcount=3; /* Catalog, Info, Pages */
return(t2p);
}
/*
* This function frees a T2P context struct pointer and any allocated data fields of it.
*/
void t2p_free(T2P* t2p)
{
int i = 0;
if (t2p != NULL) {
if(t2p->pdf_xrefoffsets != NULL){
_TIFFfree( (tdata_t) t2p->pdf_xrefoffsets);
}
if(t2p->tiff_pages != NULL){
_TIFFfree( (tdata_t) t2p->tiff_pages);
}
for(i=0;i<t2p->tiff_pagecount;i++){
if(t2p->tiff_tiles[i].tiles_tiles != NULL){
_TIFFfree( (tdata_t) t2p->tiff_tiles[i].tiles_tiles);
}
}
if(t2p->tiff_tiles != NULL){
_TIFFfree( (tdata_t) t2p->tiff_tiles);
}
if(t2p->pdf_palette != NULL){
_TIFFfree( (tdata_t) t2p->pdf_palette);
}
#ifdef OJPEG_SUPPORT
if(t2p->pdf_ojpegdata != NULL){
_TIFFfree( (tdata_t) t2p->pdf_ojpegdata);
}
#endif
_TIFFfree( (tdata_t) t2p );
}
return;
}
/*
This function validates the values of a T2P context struct pointer
before calling t2p_write_pdf with it.
*/
void t2p_validate(T2P* t2p){
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
if(t2p->pdf_defaultcompressionquality>100 ||
t2p->pdf_defaultcompressionquality<1){
t2p->pdf_defaultcompressionquality=0;
}
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_ZIP){
uint16 m=t2p->pdf_defaultcompressionquality%100;
if(t2p->pdf_defaultcompressionquality/100 > 9 ||
(m>1 && m<10) || m>15){
t2p->pdf_defaultcompressionquality=0;
}
if(t2p->pdf_defaultcompressionquality%100 !=0){
t2p->pdf_defaultcompressionquality/=100;
t2p->pdf_defaultcompressionquality*=100;
TIFFError(
TIFF2PDF_MODULE,
"PNG Group predictor differencing not implemented, assuming compression quality %u",
t2p->pdf_defaultcompressionquality);
}
t2p->pdf_defaultcompressionquality%=100;
if(t2p->pdf_minorversion<2){t2p->pdf_minorversion=2;}
}
#endif
(void)0;
return;
}
/*
This function scans the input TIFF file for pages. It attempts
to determine which IFD's of the TIFF file contain image document
pages. For each, it gathers some information that has to do
with the output of the PDF document as a whole.
*/
void t2p_read_tiff_init(T2P* t2p, TIFF* input){
tdir_t directorycount=0;
tdir_t i=0;
uint16 pagen=0;
uint16 paged=0;
uint16 xuint16=0;
directorycount=TIFFNumberOfDirectories(input);
t2p->tiff_pages = (T2P_PAGE*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,directorycount,sizeof(T2P_PAGE)));
if(t2p->tiff_pages==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for tiff_pages array, %s",
(TIFF_SIZE_T) directorycount * sizeof(T2P_PAGE),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
_TIFFmemset( t2p->tiff_pages, 0x00, directorycount * sizeof(T2P_PAGE));
t2p->tiff_tiles = (T2P_TILES*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,directorycount,sizeof(T2P_TILES)));
if(t2p->tiff_tiles==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for tiff_tiles array, %s",
(TIFF_SIZE_T) directorycount * sizeof(T2P_TILES),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
_TIFFmemset( t2p->tiff_tiles, 0x00, directorycount * sizeof(T2P_TILES));
for(i=0;i<directorycount;i++){
uint32 subfiletype = 0;
if(!TIFFSetDirectory(input, i)){
TIFFError(
TIFF2PDF_MODULE,
"Can't set directory %u of input file %s",
i,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_PAGENUMBER, &pagen, &paged)){
if((pagen>paged) && (paged != 0)){
t2p->tiff_pages[t2p->tiff_pagecount].page_number =
paged;
} else {
t2p->tiff_pages[t2p->tiff_pagecount].page_number =
pagen;
}
goto ispage2;
}
if(TIFFGetField(input, TIFFTAG_SUBFILETYPE, &subfiletype)){
if ( ((subfiletype & FILETYPE_PAGE) != 0)
|| (subfiletype == 0)){
goto ispage;
} else {
goto isnotpage;
}
}
if(TIFFGetField(input, TIFFTAG_OSUBFILETYPE, &subfiletype)){
if ((subfiletype == OFILETYPE_IMAGE)
|| (subfiletype == OFILETYPE_PAGE)
|| (subfiletype == 0) ){
goto ispage;
} else {
goto isnotpage;
}
}
ispage:
t2p->tiff_pages[t2p->tiff_pagecount].page_number=t2p->tiff_pagecount;
ispage2:
t2p->tiff_pages[t2p->tiff_pagecount].page_directory=i;
if(TIFFIsTiled(input)){
t2p->tiff_pages[t2p->tiff_pagecount].page_tilecount =
TIFFNumberOfTiles(input);
}
t2p->tiff_pagecount++;
isnotpage:
(void)0;
}
qsort((void*) t2p->tiff_pages, t2p->tiff_pagecount,
sizeof(T2P_PAGE), t2p_cmp_t2p_page);
for(i=0;i<t2p->tiff_pagecount;i++){
t2p->pdf_xrefcount += 5;
TIFFSetDirectory(input, t2p->tiff_pages[i].page_directory );
if((TIFFGetField(input, TIFFTAG_PHOTOMETRIC, &xuint16)
&& (xuint16==PHOTOMETRIC_PALETTE))
|| TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)) {
t2p->tiff_pages[i].page_extra++;
t2p->pdf_xrefcount++;
}
#ifdef ZIP_SUPPORT
if (TIFFGetField(input, TIFFTAG_COMPRESSION, &xuint16)) {
if( (xuint16== COMPRESSION_DEFLATE ||
xuint16== COMPRESSION_ADOBE_DEFLATE) &&
((t2p->tiff_pages[i].page_tilecount != 0)
|| TIFFNumberOfStrips(input)==1) &&
(t2p->pdf_nopassthrough==0) ){
if(t2p->pdf_minorversion<2){t2p->pdf_minorversion=2;}
}
}
#endif
if (TIFFGetField(input, TIFFTAG_TRANSFERFUNCTION,
&(t2p->tiff_transferfunction[0]),
&(t2p->tiff_transferfunction[1]),
&(t2p->tiff_transferfunction[2]))) {
if((t2p->tiff_transferfunction[1] != (float*) NULL) &&
(t2p->tiff_transferfunction[2] != (float*) NULL) &&
(t2p->tiff_transferfunction[1] !=
t2p->tiff_transferfunction[0])) {
t2p->tiff_transferfunctioncount = 3;
t2p->tiff_pages[i].page_extra += 4;
t2p->pdf_xrefcount += 4;
} else {
t2p->tiff_transferfunctioncount = 1;
t2p->tiff_pages[i].page_extra += 2;
t2p->pdf_xrefcount += 2;
}
if(t2p->pdf_minorversion < 2)
t2p->pdf_minorversion = 2;
} else {
t2p->tiff_transferfunctioncount=0;
}
if( TIFFGetField(
input,
TIFFTAG_ICCPROFILE,
&(t2p->tiff_iccprofilelength),
&(t2p->tiff_iccprofile)) != 0){
t2p->tiff_pages[i].page_extra++;
t2p->pdf_xrefcount++;
if(t2p->pdf_minorversion<3){t2p->pdf_minorversion=3;}
}
t2p->tiff_tiles[i].tiles_tilecount=
t2p->tiff_pages[i].page_tilecount;
if( (TIFFGetField(input, TIFFTAG_PLANARCONFIG, &xuint16) != 0)
&& (xuint16 == PLANARCONFIG_SEPARATE ) ){
if( !TIFFGetField(input, TIFFTAG_SAMPLESPERPIXEL, &xuint16) )
{
TIFFError(
TIFF2PDF_MODULE,
"Missing SamplesPerPixel, %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( (t2p->tiff_tiles[i].tiles_tilecount % xuint16) != 0 )
{
TIFFError(
TIFF2PDF_MODULE,
"Invalid tile count, %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->tiff_tiles[i].tiles_tilecount/= xuint16;
}
if( t2p->tiff_tiles[i].tiles_tilecount > 0){
t2p->pdf_xrefcount +=
(t2p->tiff_tiles[i].tiles_tilecount -1)*2;
TIFFGetField(input,
TIFFTAG_TILEWIDTH,
&( t2p->tiff_tiles[i].tiles_tilewidth) );
TIFFGetField(input,
TIFFTAG_TILELENGTH,
&( t2p->tiff_tiles[i].tiles_tilelength) );
t2p->tiff_tiles[i].tiles_tiles =
(T2P_TILE*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->tiff_tiles[i].tiles_tilecount,
sizeof(T2P_TILE)) );
if( t2p->tiff_tiles[i].tiles_tiles == NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for t2p_read_tiff_init, %s",
(TIFF_SIZE_T) t2p->tiff_tiles[i].tiles_tilecount * sizeof(T2P_TILE),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
}
return;
}
/*
* This function is used by qsort to sort a T2P_PAGE* array of page structures
* by page number. If the page numbers are the same, we fall back to comparing
* directory numbers to preserve the order of the input file.
*/
int t2p_cmp_t2p_page(const void* e1, const void* e2){
int d;
d = (int32)(((T2P_PAGE*)e1)->page_number) - (int32)(((T2P_PAGE*)e2)->page_number);
if(d == 0){
d = (int32)(((T2P_PAGE*)e1)->page_directory) - (int32)(((T2P_PAGE*)e2)->page_directory);
}
return d;
}
/*
This function sets the input directory to the directory of a given
page and determines information about the image. It checks
the image characteristics to determine if it is possible to convert
the image data into a page of PDF output, setting values of the T2P
struct for this page. It determines what color space is used in
the output PDF to represent the image.
It determines if the image can be converted as raw data without
requiring transcoding of the image data.
*/
void t2p_read_tiff_data(T2P* t2p, TIFF* input){
int i=0;
uint16* r;
uint16* g;
uint16* b;
uint16* a;
uint16 xuint16;
uint16* xuint16p;
float* xfloatp;
t2p->pdf_transcode = T2P_TRANSCODE_ENCODE;
t2p->pdf_sample = T2P_SAMPLE_NOTHING;
t2p->pdf_switchdecode = t2p->pdf_colorspace_invert;
TIFFSetDirectory(input, t2p->tiff_pages[t2p->pdf_page].page_directory);
TIFFGetField(input, TIFFTAG_IMAGEWIDTH, &(t2p->tiff_width));
if(t2p->tiff_width == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with zero width",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetField(input, TIFFTAG_IMAGELENGTH, &(t2p->tiff_length));
if(t2p->tiff_length == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with zero length",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_COMPRESSION, &(t2p->tiff_compression)) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with no compression tag",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( TIFFIsCODECConfigured(t2p->tiff_compression) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with compression type %u: not configured",
TIFFFileName(input),
t2p->tiff_compression
);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetFieldDefaulted(input, TIFFTAG_BITSPERSAMPLE, &(t2p->tiff_bitspersample));
switch(t2p->tiff_bitspersample){
case 1:
case 2:
case 4:
case 8:
break;
case 0:
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has 0 bits per sample, assuming 1",
TIFFFileName(input));
t2p->tiff_bitspersample=1;
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with %u bits per sample",
TIFFFileName(input),
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
TIFFGetFieldDefaulted(input, TIFFTAG_SAMPLESPERPIXEL, &(t2p->tiff_samplesperpixel));
if(t2p->tiff_samplesperpixel>4){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->tiff_samplesperpixel==0){
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has 0 samples per pixel, assuming 1",
TIFFFileName(input));
t2p->tiff_samplesperpixel=1;
}
if(TIFFGetField(input, TIFFTAG_SAMPLEFORMAT, &xuint16) != 0 ){
switch(xuint16){
case 0:
case 1:
case 4:
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with sample format %u",
TIFFFileName(input),
xuint16);
t2p->t2p_error = T2P_ERR_ERROR;
return;
break;
}
}
TIFFGetFieldDefaulted(input, TIFFTAG_FILLORDER, &(t2p->tiff_fillorder));
if(TIFFGetField(input, TIFFTAG_PHOTOMETRIC, &(t2p->tiff_photometric)) == 0){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with no photometric interpretation tag",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
switch(t2p->tiff_photometric){
case PHOTOMETRIC_MINISWHITE:
case PHOTOMETRIC_MINISBLACK:
if (t2p->tiff_bitspersample==1){
t2p->pdf_colorspace=T2P_CS_BILEVEL;
if(t2p->tiff_photometric==PHOTOMETRIC_MINISWHITE){
t2p->pdf_switchdecode ^= 1;
}
} else {
t2p->pdf_colorspace=T2P_CS_GRAY;
if(t2p->tiff_photometric==PHOTOMETRIC_MINISWHITE){
t2p->pdf_switchdecode ^= 1;
}
}
break;
case PHOTOMETRIC_RGB:
t2p->pdf_colorspace=T2P_CS_RGB;
if(t2p->tiff_samplesperpixel == 3){
break;
}
if(TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)){
if(xuint16==1)
goto photometric_palette;
}
if(t2p->tiff_samplesperpixel > 3) {
if(t2p->tiff_samplesperpixel == 4) {
t2p->pdf_colorspace = T2P_CS_RGB;
if(TIFFGetField(input,
TIFFTAG_EXTRASAMPLES,
&xuint16, &xuint16p)
&& xuint16 == 1) {
if(xuint16p[0] == EXTRASAMPLE_ASSOCALPHA){
if( t2p->tiff_bitspersample != 8 )
{
TIFFError(
TIFF2PDF_MODULE,
"No support for BitsPerSample=%d for RGBA",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_sample=T2P_SAMPLE_RGBAA_TO_RGB;
break;
}
if(xuint16p[0] == EXTRASAMPLE_UNASSALPHA){
if( t2p->tiff_bitspersample != 8 )
{
TIFFError(
TIFF2PDF_MODULE,
"No support for BitsPerSample=%d for RGBA",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_sample=T2P_SAMPLE_RGBA_TO_RGB;
break;
}
TIFFWarning(
TIFF2PDF_MODULE,
"RGB image %s has 4 samples per pixel, assuming RGBA",
TIFFFileName(input));
break;
}
t2p->pdf_colorspace=T2P_CS_CMYK;
t2p->pdf_switchdecode ^= 1;
TIFFWarning(
TIFF2PDF_MODULE,
"RGB image %s has 4 samples per pixel, assuming inverse CMYK",
TIFFFileName(input));
break;
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for RGB image %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
break;
}
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for RGB image %s with %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
break;
}
case PHOTOMETRIC_PALETTE:
photometric_palette:
if(t2p->tiff_samplesperpixel!=1){
TIFFError(
TIFF2PDF_MODULE,
"No support for palettized image %s with not one sample per pixel",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_colorspace=T2P_CS_RGB | T2P_CS_PALETTE;
t2p->pdf_palettesize=0x0001<<t2p->tiff_bitspersample;
if(!TIFFGetField(input, TIFFTAG_COLORMAP, &r, &g, &b)){
TIFFError(
TIFF2PDF_MODULE,
"Palettized image %s has no color map",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->pdf_palette != NULL){
_TIFFfree(t2p->pdf_palette);
t2p->pdf_palette=NULL;
}
t2p->pdf_palette = (unsigned char*)
_TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_palettesize,3));
if(t2p->pdf_palette==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_read_tiff_image, %s",
t2p->pdf_palettesize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<t2p->pdf_palettesize;i++){
t2p->pdf_palette[(i*3)] = (unsigned char) (r[i]>>8);
t2p->pdf_palette[(i*3)+1]= (unsigned char) (g[i]>>8);
t2p->pdf_palette[(i*3)+2]= (unsigned char) (b[i]>>8);
}
t2p->pdf_palettesize *= 3;
break;
case PHOTOMETRIC_SEPARATED:
if(TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)){
if(xuint16==1){
goto photometric_palette_cmyk;
}
}
if( TIFFGetField(input, TIFFTAG_INKSET, &xuint16) ){
if(xuint16 != INKSET_CMYK){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s because its inkset is not CMYK",
TIFFFileName(input) );
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
if(t2p->tiff_samplesperpixel==4){
t2p->pdf_colorspace=T2P_CS_CMYK;
} else {
TIFFError(
TIFF2PDF_MODULE,
"No support for %s because it has %u samples per pixel",
TIFFFileName(input),
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
break;
photometric_palette_cmyk:
if(t2p->tiff_samplesperpixel!=1){
TIFFError(
TIFF2PDF_MODULE,
"No support for palettized CMYK image %s with not one sample per pixel",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_colorspace=T2P_CS_CMYK | T2P_CS_PALETTE;
t2p->pdf_palettesize=0x0001<<t2p->tiff_bitspersample;
if(!TIFFGetField(input, TIFFTAG_COLORMAP, &r, &g, &b, &a)){
TIFFError(
TIFF2PDF_MODULE,
"Palettized image %s has no color map",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(t2p->pdf_palette != NULL){
_TIFFfree(t2p->pdf_palette);
t2p->pdf_palette=NULL;
}
t2p->pdf_palette = (unsigned char*)
_TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_palettesize,4));
if(t2p->pdf_palette==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_read_tiff_image, %s",
t2p->pdf_palettesize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<t2p->pdf_palettesize;i++){
t2p->pdf_palette[(i*4)] = (unsigned char) (r[i]>>8);
t2p->pdf_palette[(i*4)+1]= (unsigned char) (g[i]>>8);
t2p->pdf_palette[(i*4)+2]= (unsigned char) (b[i]>>8);
t2p->pdf_palette[(i*4)+3]= (unsigned char) (a[i]>>8);
}
t2p->pdf_palettesize *= 4;
break;
case PHOTOMETRIC_YCBCR:
t2p->pdf_colorspace=T2P_CS_RGB;
if(t2p->tiff_samplesperpixel==1){
t2p->pdf_colorspace=T2P_CS_GRAY;
t2p->tiff_photometric=PHOTOMETRIC_MINISBLACK;
break;
}
t2p->pdf_sample=T2P_SAMPLE_YCBCR_TO_RGB;
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
t2p->pdf_sample=T2P_SAMPLE_NOTHING;
}
#endif
break;
case PHOTOMETRIC_CIELAB:
if( t2p->tiff_samplesperpixel != 3){
TIFFError(
TIFF2PDF_MODULE,
"Unsupported samplesperpixel = %d for CIELAB",
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( t2p->tiff_bitspersample != 8){
TIFFError(
TIFF2PDF_MODULE,
"Invalid bitspersample = %d for CIELAB",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_labrange[0]= -127;
t2p->pdf_labrange[1]= 127;
t2p->pdf_labrange[2]= -127;
t2p->pdf_labrange[3]= 127;
t2p->pdf_sample=T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_ICCLAB:
t2p->pdf_labrange[0]= 0;
t2p->pdf_labrange[1]= 255;
t2p->pdf_labrange[2]= 0;
t2p->pdf_labrange[3]= 255;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_ITULAB:
if( t2p->tiff_samplesperpixel != 3){
TIFFError(
TIFF2PDF_MODULE,
"Unsupported samplesperpixel = %d for ITULAB",
t2p->tiff_samplesperpixel);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( t2p->tiff_bitspersample != 8){
TIFFError(
TIFF2PDF_MODULE,
"Invalid bitspersample = %d for ITULAB",
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->pdf_labrange[0]=-85;
t2p->pdf_labrange[1]=85;
t2p->pdf_labrange[2]=-75;
t2p->pdf_labrange[3]=124;
t2p->pdf_sample=T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED;
t2p->pdf_colorspace=T2P_CS_LAB;
break;
case PHOTOMETRIC_LOGL:
case PHOTOMETRIC_LOGLUV:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with photometric interpretation LogL/LogLuv",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with photometric interpretation %u",
TIFFFileName(input),
t2p->tiff_photometric);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_PLANARCONFIG, &(t2p->tiff_planar))){
switch(t2p->tiff_planar){
case 0:
TIFFWarning(
TIFF2PDF_MODULE,
"Image %s has planar configuration 0, assuming 1",
TIFFFileName(input));
t2p->tiff_planar=PLANARCONFIG_CONTIG;
case PLANARCONFIG_CONTIG:
break;
case PLANARCONFIG_SEPARATE:
t2p->pdf_sample=T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG;
if(t2p->tiff_bitspersample!=8){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with separated planar configuration and %u bits per sample",
TIFFFileName(input),
t2p->tiff_bitspersample);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
break;
default:
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with planar configuration %u",
TIFFFileName(input),
t2p->tiff_planar);
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
TIFFGetFieldDefaulted(input, TIFFTAG_ORIENTATION,
&(t2p->tiff_orientation));
if(t2p->tiff_orientation>8){
TIFFWarning(TIFF2PDF_MODULE,
"Image %s has orientation %u, assuming 0",
TIFFFileName(input), t2p->tiff_orientation);
t2p->tiff_orientation=0;
}
if(TIFFGetField(input, TIFFTAG_XRESOLUTION, &(t2p->tiff_xres) ) == 0){
t2p->tiff_xres=0.0;
}
if(TIFFGetField(input, TIFFTAG_YRESOLUTION, &(t2p->tiff_yres) ) == 0){
t2p->tiff_yres=0.0;
}
TIFFGetFieldDefaulted(input, TIFFTAG_RESOLUTIONUNIT,
&(t2p->tiff_resunit));
if(t2p->tiff_resunit == RESUNIT_CENTIMETER) {
t2p->tiff_xres *= 2.54F;
t2p->tiff_yres *= 2.54F;
} else if (t2p->tiff_resunit != RESUNIT_INCH
&& t2p->pdf_centimeters != 0) {
t2p->tiff_xres *= 2.54F;
t2p->tiff_yres *= 2.54F;
}
t2p_compose_pdf_page(t2p);
if( t2p->t2p_error == T2P_ERR_ERROR )
return;
t2p->pdf_transcode = T2P_TRANSCODE_ENCODE;
if(t2p->pdf_nopassthrough==0){
#ifdef CCITT_SUPPORT
if(t2p->tiff_compression==COMPRESSION_CCITTFAX4
){
if(TIFFIsTiled(input) || (TIFFNumberOfStrips(input)==1) ){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_G4;
}
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->tiff_compression== COMPRESSION_ADOBE_DEFLATE
|| t2p->tiff_compression==COMPRESSION_DEFLATE){
if(TIFFIsTiled(input) || (TIFFNumberOfStrips(input)==1) ){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_ZIP;
}
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_JPEG;
t2p_process_ojpeg_tables(t2p, input);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_JPEG){
t2p->pdf_transcode = T2P_TRANSCODE_RAW;
t2p->pdf_compression=T2P_COMPRESS_JPEG;
}
#endif
(void)0;
}
if(t2p->pdf_transcode!=T2P_TRANSCODE_RAW){
t2p->pdf_compression = t2p->pdf_defaultcompression;
}
#ifdef JPEG_SUPPORT
if(t2p->pdf_defaultcompression==T2P_COMPRESS_JPEG){
if(t2p->pdf_colorspace & T2P_CS_PALETTE){
t2p->pdf_sample|=T2P_SAMPLE_REALIZE_PALETTE;
t2p->pdf_colorspace ^= T2P_CS_PALETTE;
t2p->tiff_pages[t2p->pdf_page].page_extra--;
}
}
if(t2p->tiff_compression==COMPRESSION_JPEG){
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with JPEG compression and separated planar configuration",
TIFFFileName(input));
t2p->t2p_error=T2P_ERR_ERROR;
return;
}
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
TIFFError(
TIFF2PDF_MODULE,
"No support for %s with OJPEG compression and separated planar configuration",
TIFFFileName(input));
t2p->t2p_error=T2P_ERR_ERROR;
return;
}
}
#endif
if(t2p->pdf_sample & T2P_SAMPLE_REALIZE_PALETTE){
if(t2p->pdf_colorspace & T2P_CS_CMYK){
t2p->tiff_samplesperpixel=4;
t2p->tiff_photometric=PHOTOMETRIC_SEPARATED;
} else {
t2p->tiff_samplesperpixel=3;
t2p->tiff_photometric=PHOTOMETRIC_RGB;
}
}
if (TIFFGetField(input, TIFFTAG_TRANSFERFUNCTION,
&(t2p->tiff_transferfunction[0]),
&(t2p->tiff_transferfunction[1]),
&(t2p->tiff_transferfunction[2]))) {
if((t2p->tiff_transferfunction[1] != (float*) NULL) &&
(t2p->tiff_transferfunction[2] != (float*) NULL) &&
(t2p->tiff_transferfunction[1] !=
t2p->tiff_transferfunction[0])) {
t2p->tiff_transferfunctioncount=3;
} else {
t2p->tiff_transferfunctioncount=1;
}
} else {
t2p->tiff_transferfunctioncount=0;
}
if(TIFFGetField(input, TIFFTAG_WHITEPOINT, &xfloatp)!=0){
t2p->tiff_whitechromaticities[0]=xfloatp[0];
t2p->tiff_whitechromaticities[1]=xfloatp[1];
if(t2p->pdf_colorspace & T2P_CS_GRAY){
t2p->pdf_colorspace |= T2P_CS_CALGRAY;
}
if(t2p->pdf_colorspace & T2P_CS_RGB){
t2p->pdf_colorspace |= T2P_CS_CALRGB;
}
}
if(TIFFGetField(input, TIFFTAG_PRIMARYCHROMATICITIES, &xfloatp)!=0){
t2p->tiff_primarychromaticities[0]=xfloatp[0];
t2p->tiff_primarychromaticities[1]=xfloatp[1];
t2p->tiff_primarychromaticities[2]=xfloatp[2];
t2p->tiff_primarychromaticities[3]=xfloatp[3];
t2p->tiff_primarychromaticities[4]=xfloatp[4];
t2p->tiff_primarychromaticities[5]=xfloatp[5];
if(t2p->pdf_colorspace & T2P_CS_RGB){
t2p->pdf_colorspace |= T2P_CS_CALRGB;
}
}
if(t2p->pdf_colorspace & T2P_CS_LAB){
if(TIFFGetField(input, TIFFTAG_WHITEPOINT, &xfloatp) != 0){
t2p->tiff_whitechromaticities[0]=xfloatp[0];
t2p->tiff_whitechromaticities[1]=xfloatp[1];
} else {
t2p->tiff_whitechromaticities[0]=0.3457F; /* 0.3127F; */
t2p->tiff_whitechromaticities[1]=0.3585F; /* 0.3290F; */
}
}
if(TIFFGetField(input,
TIFFTAG_ICCPROFILE,
&(t2p->tiff_iccprofilelength),
&(t2p->tiff_iccprofile))!=0){
t2p->pdf_colorspace |= T2P_CS_ICCBASED;
} else {
t2p->tiff_iccprofilelength=0;
t2p->tiff_iccprofile=NULL;
}
#ifdef CCITT_SUPPORT
if( t2p->tiff_bitspersample==1 &&
t2p->tiff_samplesperpixel==1){
t2p->pdf_compression = T2P_COMPRESS_G4;
}
#endif
return;
}
/*
This function returns the necessary size of a data buffer to contain the raw or
uncompressed image data from the input TIFF for a page.
*/
void t2p_read_tiff_size(T2P* t2p, TIFF* input){
uint64* sbc=NULL;
#if defined(JPEG_SUPPORT) || defined (OJPEG_SUPPORT)
unsigned char* jpt=NULL;
tstrip_t i=0;
tstrip_t stripcount=0;
#endif
uint64 k = 0;
if(t2p->pdf_transcode == T2P_TRANSCODE_RAW){
#ifdef CCITT_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_G4 ){
TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc);
if (sbc[0] != (uint64)(tmsize_t)sbc[0]) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize=(tmsize_t)sbc[0];
return;
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_ZIP){
TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc);
if (sbc[0] != (uint64)(tmsize_t)sbc[0]) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize=(tmsize_t)sbc[0];
return;
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_OJPEG){
if(!TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc)){
TIFFError(TIFF2PDF_MODULE,
"Input file %s missing field: TIFFTAG_STRIPBYTECOUNTS",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
k = checkAdd64(k, sbc[i], t2p);
}
if(TIFFGetField(input, TIFFTAG_JPEGIFOFFSET, &(t2p->tiff_dataoffset))){
if(t2p->tiff_dataoffset != 0){
if(TIFFGetField(input, TIFFTAG_JPEGIFBYTECOUNT, &(t2p->tiff_datasize))!=0){
if((uint64)t2p->tiff_datasize < k) {
TIFFWarning(TIFF2PDF_MODULE,
"Input file %s has short JPEG interchange file byte count",
TIFFFileName(input));
t2p->pdf_ojpegiflength=t2p->tiff_datasize;
k = checkAdd64(k, t2p->tiff_datasize, t2p);
k = checkAdd64(k, 6, t2p);
k = checkAdd64(k, stripcount, t2p);
k = checkAdd64(k, stripcount, t2p);
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
return;
}else {
TIFFError(TIFF2PDF_MODULE,
"Input file %s missing field: TIFFTAG_JPEGIFBYTECOUNT",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
}
k = checkAdd64(k, stripcount, t2p);
k = checkAdd64(k, stripcount, t2p);
k = checkAdd64(k, 2048, t2p);
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_JPEG) {
uint32 count = 0;
if(TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0 ){
if(count > 4){
k += count;
k -= 2; /* don't use EOI of header */
}
} else {
k = 2; /* SOI for first strip */
}
stripcount=TIFFNumberOfStrips(input);
if(!TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc)){
TIFFError(TIFF2PDF_MODULE,
"Input file %s missing field: TIFFTAG_STRIPBYTECOUNTS",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
for(i=0;i<stripcount;i++){
k = checkAdd64(k, sbc[i], t2p);
k -=2; /* don't use EOI of strip */
k +=2; /* add space for restart marker */
}
k = checkAdd64(k, 2, t2p); /* use EOI of last strip */
k = checkAdd64(k, 6, t2p); /* for DRI marker of first strip */
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
#endif
(void) 0;
}
k = checkMultiply64(TIFFScanlineSize(input), t2p->tiff_length, t2p);
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
}
if (k == 0) {
/* Assume we had overflow inside TIFFScanlineSize */
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
/*
This function returns the necessary size of a data buffer to contain the raw or
uncompressed image data from the input TIFF for a tile of a page.
*/
void t2p_read_tiff_size_tile(T2P* t2p, TIFF* input, ttile_t tile){
uint64* tbc = NULL;
uint16 edge=0;
#ifdef JPEG_SUPPORT
unsigned char* jpt;
#endif
uint64 k;
edge |= t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
edge |= t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
if(t2p->pdf_transcode==T2P_TRANSCODE_RAW){
if(edge
#if defined(JPEG_SUPPORT) || defined(OJPEG_SUPPORT)
&& !(t2p->pdf_compression==T2P_COMPRESS_JPEG)
#endif
){
t2p->tiff_datasize=TIFFTileSize(input);
if (t2p->tiff_datasize == 0) {
/* Assume we had overflow inside TIFFTileSize */
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
} else {
TIFFGetField(input, TIFFTAG_TILEBYTECOUNTS, &tbc);
k=tbc[tile];
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_OJPEG){
k = checkAdd64(k, 2048, t2p);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression==COMPRESSION_JPEG) {
uint32 count = 0;
if(TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt)!=0){
if(count > 4){
k = checkAdd64(k, count, t2p);
k -= 2; /* don't use EOI of header or SOI of tile */
}
}
}
#endif
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
}
k = TIFFTileSize(input);
if(t2p->tiff_planar==PLANARCONFIG_SEPARATE){
k = checkMultiply64(k, t2p->tiff_samplesperpixel, t2p);
}
if (k == 0) {
/* Assume we had overflow inside TIFFTileSize */
t2p->t2p_error = T2P_ERR_ERROR;
}
t2p->tiff_datasize = (tsize_t) k;
if ((uint64) t2p->tiff_datasize != k) {
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
}
return;
}
/*
* This functions returns a non-zero value when the tile is on the right edge
* and does not have full imaged tile width.
*/
int t2p_tile_is_right_edge(T2P_TILES tiles, ttile_t tile){
if( ((tile+1) % tiles.tiles_tilecountx == 0)
&& (tiles.tiles_edgetilewidth != 0) ){
return(1);
} else {
return(0);
}
}
/*
* This functions returns a non-zero value when the tile is on the bottom edge
* and does not have full imaged tile length.
*/
int t2p_tile_is_bottom_edge(T2P_TILES tiles, ttile_t tile){
if( ((tile+1) > (tiles.tiles_tilecount-tiles.tiles_tilecountx) )
&& (tiles.tiles_edgetilelength != 0) ){
return(1);
} else {
return(0);
}
}
/*
* This function returns a non-zero value when the tile is a right edge tile
* or a bottom edge tile.
*/
int t2p_tile_is_edge(T2P_TILES tiles, ttile_t tile){
return(t2p_tile_is_right_edge(tiles, tile) | t2p_tile_is_bottom_edge(tiles, tile) );
}
/*
This function returns a non-zero value when the tile is a right edge tile and a bottom
edge tile.
*/
int t2p_tile_is_corner_edge(T2P_TILES tiles, ttile_t tile){
return(t2p_tile_is_right_edge(tiles, tile) & t2p_tile_is_bottom_edge(tiles, tile) );
}
/*
This function reads the raster image data from the input TIFF for an image and writes
the data to the output PDF XObject image dictionary stream. It returns the amount written
or zero on error.
*/
tsize_t t2p_readwrite_pdf_image(T2P* t2p, TIFF* input, TIFF* output){
tsize_t written=0;
unsigned char* buffer=NULL;
unsigned char* samplebuffer=NULL;
tsize_t bufferoffset=0;
tsize_t samplebufferoffset=0;
tsize_t read=0;
tstrip_t i=0;
tstrip_t j=0;
tstrip_t stripcount=0;
tsize_t stripsize=0;
tsize_t sepstripcount=0;
tsize_t sepstripsize=0;
#ifdef OJPEG_SUPPORT
toff_t inputoffset=0;
uint16 h_samp=1;
uint16 v_samp=1;
uint16 ri=1;
uint32 rows=0;
#endif /* ifdef OJPEG_SUPPORT */
#ifdef JPEG_SUPPORT
unsigned char* jpt;
float* xfloatp;
uint64* sbc;
unsigned char* stripbuffer;
tsize_t striplength=0;
uint32 max_striplength=0;
#endif /* ifdef JPEG_SUPPORT */
/* Fail if prior error (in particular, can't trust tiff_datasize) */
if (t2p->t2p_error != T2P_ERR_OK)
return(0);
if(t2p->pdf_transcode == T2P_TRANSCODE_RAW){
#ifdef CCITT_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_G4){
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if (buffer == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for "
"t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFReadRawStrip(input, 0, (tdata_t) buffer,
t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){
/*
* make sure is lsb-to-msb
* bit-endianness fill order
*/
TIFFReverseBits(buffer,
t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer,
t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif /* ifdef CCITT_SUPPORT */
#ifdef ZIP_SUPPORT
if (t2p->pdf_compression == T2P_COMPRESS_ZIP) {
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer == NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
TIFFReadRawStrip(input, 0, (tdata_t) buffer,
t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB) {
TIFFReverseBits(buffer,
t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer,
t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif /* ifdef ZIP_SUPPORT */
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_OJPEG) {
if(t2p->tiff_dataoffset != 0) {
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer == NULL) {
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
if(t2p->pdf_ojpegiflength==0){
inputoffset=t2pSeekFile(input, 0,
SEEK_CUR);
t2pSeekFile(input,
t2p->tiff_dataoffset,
SEEK_SET);
t2pReadFile(input, (tdata_t) buffer,
t2p->tiff_datasize);
t2pSeekFile(input, inputoffset,
SEEK_SET);
t2pWriteFile(output, (tdata_t) buffer,
t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
} else {
inputoffset=t2pSeekFile(input, 0,
SEEK_CUR);
t2pSeekFile(input,
t2p->tiff_dataoffset,
SEEK_SET);
bufferoffset = t2pReadFile(input,
(tdata_t) buffer,
t2p->pdf_ojpegiflength);
t2p->pdf_ojpegiflength = 0;
t2pSeekFile(input, inputoffset,
SEEK_SET);
TIFFGetField(input,
TIFFTAG_YCBCRSUBSAMPLING,
&h_samp, &v_samp);
buffer[bufferoffset++]= 0xff;
buffer[bufferoffset++]= 0xdd;
buffer[bufferoffset++]= 0x00;
buffer[bufferoffset++]= 0x04;
h_samp*=8;
v_samp*=8;
ri=(t2p->tiff_width+h_samp-1) / h_samp;
TIFFGetField(input,
TIFFTAG_ROWSPERSTRIP,
&rows);
ri*=(rows+v_samp-1)/v_samp;
buffer[bufferoffset++]= (ri>>8) & 0xff;
buffer[bufferoffset++]= ri & 0xff;
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
if(i != 0 ){
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=(0xd0 | ((i-1)%8));
}
bufferoffset+=TIFFReadRawStrip(input,
i,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
}
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
}
} else {
if(! t2p->pdf_ojpegdata){
TIFFError(TIFF2PDF_MODULE,
"No support for OJPEG image %s with bad tables",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
_TIFFmemcpy(buffer, t2p->pdf_ojpegdata, t2p->pdf_ojpegdatalength);
bufferoffset=t2p->pdf_ojpegdatalength;
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
if(i != 0){
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=(0xd0 | ((i-1)%8));
}
bufferoffset+=TIFFReadRawStrip(input,
i,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
}
if( ! ( (buffer[bufferoffset-1]==0xd9) && (buffer[bufferoffset-2]==0xff) ) ){
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=0xd9;
}
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
#if 0
/*
This hunk of code removed code is clearly
mis-placed and we are not sure where it
should be (if anywhere)
*/
TIFFError(TIFF2PDF_MODULE,
"No support for OJPEG image %s with no JPEG File Interchange offset",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
#endif
}
}
#endif /* ifdef OJPEG_SUPPORT */
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_JPEG) {
uint32 count = 0;
buffer = (unsigned char*)
_TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
if (TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0) {
if(count > 4) {
_TIFFmemcpy(buffer, jpt, count);
bufferoffset += count - 2;
}
}
stripcount=TIFFNumberOfStrips(input);
TIFFGetField(input, TIFFTAG_STRIPBYTECOUNTS, &sbc);
for(i=0;i<stripcount;i++){
if(sbc[i]>max_striplength) max_striplength=sbc[i];
}
stripbuffer = (unsigned char*)
_TIFFmalloc(max_striplength);
if(stripbuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_readwrite_pdf_image, %s",
max_striplength,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
for(i=0;i<stripcount;i++){
striplength=TIFFReadRawStrip(input, i, (tdata_t) stripbuffer, -1);
if(!t2p_process_jpeg_strip(
stripbuffer,
&striplength,
buffer,
t2p->tiff_datasize,
&bufferoffset,
i,
t2p->tiff_length)){
TIFFError(TIFF2PDF_MODULE,
"Can't process JPEG data in input file %s",
TIFFFileName(input));
_TIFFfree(samplebuffer);
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
}
buffer[bufferoffset++]=0xff;
buffer[bufferoffset++]=0xd9;
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(stripbuffer);
_TIFFfree(buffer);
return(bufferoffset);
}
#endif /* ifdef JPEG_SUPPORT */
(void)0;
}
if(t2p->pdf_sample==T2P_SAMPLE_NOTHING){
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
stripsize=TIFFStripSize(input);
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
read =
TIFFReadEncodedStrip(input,
i,
(tdata_t) &buffer[bufferoffset],
TIFFmin(stripsize, t2p->tiff_datasize - bufferoffset));
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding strip %u of %s",
i,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
bufferoffset+=read;
}
} else {
if(t2p->pdf_sample & T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG){
sepstripsize=TIFFStripSize(input);
sepstripcount=TIFFNumberOfStrips(input);
stripsize=sepstripsize*t2p->tiff_samplesperpixel;
stripcount=sepstripcount/t2p->tiff_samplesperpixel;
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
samplebuffer = (unsigned char*) _TIFFmalloc(stripsize);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
_TIFFfree(buffer);
return(0);
}
for(i=0;i<stripcount;i++){
samplebufferoffset=0;
for(j=0;j<t2p->tiff_samplesperpixel;j++){
read =
TIFFReadEncodedStrip(input,
i + j*stripcount,
(tdata_t) &(samplebuffer[samplebufferoffset]),
TIFFmin(sepstripsize, stripsize - samplebufferoffset));
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding strip %u of %s",
i + j*stripcount,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
samplebufferoffset+=read;
}
t2p_sample_planar_separate_to_contig(
t2p,
&(buffer[bufferoffset]),
samplebuffer,
samplebufferoffset);
bufferoffset+=samplebufferoffset;
}
_TIFFfree(samplebuffer);
goto dataready;
}
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
memset(buffer, 0, t2p->tiff_datasize);
stripsize=TIFFStripSize(input);
stripcount=TIFFNumberOfStrips(input);
for(i=0;i<stripcount;i++){
read =
TIFFReadEncodedStrip(input,
i,
(tdata_t) &buffer[bufferoffset],
TIFFmin(stripsize, t2p->tiff_datasize - bufferoffset));
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding strip %u of %s",
i,
TIFFFileName(input));
_TIFFfree(samplebuffer);
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
bufferoffset+=read;
}
if(t2p->pdf_sample & T2P_SAMPLE_REALIZE_PALETTE){
// FIXME: overflow?
samplebuffer=(unsigned char*)_TIFFrealloc(
(tdata_t) buffer,
t2p->tiff_datasize * t2p->tiff_samplesperpixel);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
_TIFFfree(buffer);
return(0);
} else {
buffer=samplebuffer;
t2p->tiff_datasize *= t2p->tiff_samplesperpixel;
}
t2p_sample_realize_palette(t2p, buffer);
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgba_to_rgb(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length);
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBAA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgbaa_to_rgb(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length);
}
if(t2p->pdf_sample & T2P_SAMPLE_YCBCR_TO_RGB){
samplebuffer=(unsigned char*)_TIFFrealloc(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length*4);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
_TIFFfree(buffer);
return(0);
} else {
buffer=samplebuffer;
}
if(!TIFFReadRGBAImageOriented(
input,
t2p->tiff_width,
t2p->tiff_length,
(uint32*)buffer,
ORIENTATION_TOPLEFT,
0)){
TIFFError(TIFF2PDF_MODULE,
"Can't use TIFFReadRGBAImageOriented to extract RGB image from %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
t2p->tiff_datasize=t2p_sample_abgr_to_rgb(
(tdata_t) buffer,
t2p->tiff_width*t2p->tiff_length);
}
if(t2p->pdf_sample & T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED){
t2p->tiff_datasize=t2p_sample_lab_signed_to_unsigned(
(tdata_t)buffer,
t2p->tiff_width*t2p->tiff_length);
}
}
dataready:
t2p_disable(output);
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, t2p->tiff_photometric);
TIFFSetField(output, TIFFTAG_BITSPERSAMPLE, t2p->tiff_bitspersample);
TIFFSetField(output, TIFFTAG_SAMPLESPERPIXEL, t2p->tiff_samplesperpixel);
TIFFSetField(output, TIFFTAG_IMAGEWIDTH, t2p->tiff_width);
TIFFSetField(output, TIFFTAG_IMAGELENGTH, t2p->tiff_length);
TIFFSetField(output, TIFFTAG_ROWSPERSTRIP, t2p->tiff_length);
TIFFSetField(output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(output, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
switch(t2p->pdf_compression){
case T2P_COMPRESS_NONE:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
break;
#ifdef CCITT_SUPPORT
case T2P_COMPRESS_G4:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_CCITTFAX4);
break;
#endif /* ifdef CCITT_SUPPORT */
#ifdef JPEG_SUPPORT
case T2P_COMPRESS_JPEG:
if(t2p->tiff_photometric==PHOTOMETRIC_YCBCR) {
uint16 hor = 0, ver = 0;
if (TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &hor, &ver) !=0 ) {
if(hor != 0 && ver != 0){
TIFFSetField(output, TIFFTAG_YCBCRSUBSAMPLING, hor, ver);
}
}
if(TIFFGetField(input, TIFFTAG_REFERENCEBLACKWHITE, &xfloatp)!=0){
TIFFSetField(output, TIFFTAG_REFERENCEBLACKWHITE, xfloatp);
}
}
if(TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_JPEG)==0){
TIFFError(TIFF2PDF_MODULE,
"Unable to use JPEG compression for input %s and output %s",
TIFFFileName(input),
TIFFFileName(output));
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFSetField(output, TIFFTAG_JPEGTABLESMODE, 0);
if(t2p->pdf_colorspace & (T2P_CS_RGB | T2P_CS_LAB)){
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR);
if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR){
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB);
} else {
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RAW);
}
}
if(t2p->pdf_colorspace & T2P_CS_GRAY){
(void)0;
}
if(t2p->pdf_colorspace & T2P_CS_CMYK){
(void)0;
}
if(t2p->pdf_defaultcompressionquality != 0){
TIFFSetField(output,
TIFFTAG_JPEGQUALITY,
t2p->pdf_defaultcompressionquality);
}
break;
#endif /* ifdef JPEG_SUPPORT */
#ifdef ZIP_SUPPORT
case T2P_COMPRESS_ZIP:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
if(t2p->pdf_defaultcompressionquality%100 != 0){
TIFFSetField(output,
TIFFTAG_PREDICTOR,
t2p->pdf_defaultcompressionquality % 100);
}
if(t2p->pdf_defaultcompressionquality/100 != 0){
TIFFSetField(output,
TIFFTAG_ZIPQUALITY,
(t2p->pdf_defaultcompressionquality / 100));
}
break;
#endif /* ifdef ZIP_SUPPORT */
default:
break;
}
t2p_enable(output);
t2p->outputwritten = 0;
#ifdef JPEG_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_JPEG
&& t2p->tiff_photometric == PHOTOMETRIC_YCBCR){
bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t)0,
buffer,
stripsize * stripcount);
} else
#endif /* ifdef JPEG_SUPPORT */
{
bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t)0,
buffer,
t2p->tiff_datasize);
}
if (buffer != NULL) {
_TIFFfree(buffer);
buffer=NULL;
}
if (bufferoffset == (tsize_t)-1) {
TIFFError(TIFF2PDF_MODULE,
"Error writing encoded strip to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
written = t2p->outputwritten;
return(written);
}
/*
* This function reads the raster image data from the input TIFF for an image
* tile and writes the data to the output PDF XObject image dictionary stream
* for the tile. It returns the amount written or zero on error.
*/
tsize_t t2p_readwrite_pdf_image_tile(T2P* t2p, TIFF* input, TIFF* output, ttile_t tile){
uint16 edge=0;
tsize_t written=0;
unsigned char* buffer=NULL;
tsize_t bufferoffset=0;
unsigned char* samplebuffer=NULL;
tsize_t samplebufferoffset=0;
tsize_t read=0;
uint16 i=0;
ttile_t tilecount=0;
/* tsize_t tilesize=0; */
ttile_t septilecount=0;
tsize_t septilesize=0;
#ifdef JPEG_SUPPORT
unsigned char* jpt;
float* xfloatp;
uint32 xuint32=0;
#endif
/* Fail if prior error (in particular, can't trust tiff_datasize) */
if (t2p->t2p_error != T2P_ERR_OK)
return(0);
edge |= t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
edge |= t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile);
if( (t2p->pdf_transcode == T2P_TRANSCODE_RAW) && ((edge == 0)
#if defined(JPEG_SUPPORT) || defined(OJPEG_SUPPORT)
|| (t2p->pdf_compression == T2P_COMPRESS_JPEG)
#endif
)
){
#ifdef CCITT_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_G4){
buffer= (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFReadRawTile(input, tile, (tdata_t) buffer, t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){
TIFFReverseBits(buffer, t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif
#ifdef ZIP_SUPPORT
if(t2p->pdf_compression == T2P_COMPRESS_ZIP){
buffer= (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
TIFFReadRawTile(input, tile, (tdata_t) buffer, t2p->tiff_datasize);
if (t2p->tiff_fillorder==FILLORDER_LSB2MSB){
TIFFReverseBits(buffer, t2p->tiff_datasize);
}
t2pWriteFile(output, (tdata_t) buffer, t2p->tiff_datasize);
_TIFFfree(buffer);
return(t2p->tiff_datasize);
}
#endif
#ifdef OJPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_OJPEG){
if(! t2p->pdf_ojpegdata){
TIFFError(TIFF2PDF_MODULE,
"No support for OJPEG image %s with "
"bad tables",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
buffer=(unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
_TIFFmemcpy(buffer, t2p->pdf_ojpegdata, t2p->pdf_ojpegdatalength);
if(edge!=0){
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile)){
buffer[7]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength >> 8) & 0xff;
buffer[8]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength ) & 0xff;
}
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile)){
buffer[9]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth >> 8) & 0xff;
buffer[10]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth ) & 0xff;
}
}
bufferoffset=t2p->pdf_ojpegdatalength;
bufferoffset+=TIFFReadRawTile(input,
tile,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
((unsigned char*)buffer)[bufferoffset++]=0xff;
((unsigned char*)buffer)[bufferoffset++]=0xd9;
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
}
#endif
#ifdef JPEG_SUPPORT
if(t2p->tiff_compression == COMPRESSION_JPEG){
unsigned char table_end[2];
uint32 count = 0;
buffer= (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(TIFF_SIZE_T) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(TIFFGetField(input, TIFFTAG_JPEGTABLES, &count, &jpt) != 0) {
if (count > 4) {
int retTIFFReadRawTile;
/* Ignore EOI marker of JpegTables */
_TIFFmemcpy(buffer, jpt, count - 2);
bufferoffset += count - 2;
/* Store last 2 bytes of the JpegTables */
table_end[0] = buffer[bufferoffset-2];
table_end[1] = buffer[bufferoffset-1];
xuint32 = bufferoffset;
bufferoffset -= 2;
retTIFFReadRawTile= TIFFReadRawTile(
input,
tile,
(tdata_t) &(((unsigned char*)buffer)[bufferoffset]),
-1);
if( retTIFFReadRawTile < 0 )
{
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
bufferoffset += retTIFFReadRawTile;
/* Overwrite SOI marker of image scan with previously */
/* saved end of JpegTables */
buffer[xuint32-2]=table_end[0];
buffer[xuint32-1]=table_end[1];
}
}
t2pWriteFile(output, (tdata_t) buffer, bufferoffset);
_TIFFfree(buffer);
return(bufferoffset);
}
#endif
(void)0;
}
if(t2p->pdf_sample==T2P_SAMPLE_NOTHING){
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory for "
"t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
read = TIFFReadEncodedTile(
input,
tile,
(tdata_t) &buffer[bufferoffset],
t2p->tiff_datasize);
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding tile %u of %s",
tile,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
} else {
if(t2p->pdf_sample == T2P_SAMPLE_PLANAR_SEPARATE_TO_CONTIG){
septilesize=TIFFTileSize(input);
septilecount=TIFFNumberOfTiles(input);
/* tilesize=septilesize*t2p->tiff_samplesperpixel; */
tilecount=septilecount/t2p->tiff_samplesperpixel;
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
samplebuffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(samplebuffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
samplebufferoffset=0;
for(i=0;i<t2p->tiff_samplesperpixel;i++){
read =
TIFFReadEncodedTile(input,
tile + i*tilecount,
(tdata_t) &(samplebuffer[samplebufferoffset]),
septilesize);
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding tile %u of %s",
tile + i*tilecount,
TIFFFileName(input));
_TIFFfree(samplebuffer);
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
samplebufferoffset+=read;
}
t2p_sample_planar_separate_to_contig(
t2p,
&(buffer[bufferoffset]),
samplebuffer,
samplebufferoffset);
bufferoffset+=samplebufferoffset;
_TIFFfree(samplebuffer);
}
if(buffer==NULL){
buffer = (unsigned char*) _TIFFmalloc(t2p->tiff_datasize);
if(buffer==NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %lu bytes of memory "
"for t2p_readwrite_pdf_image_tile, %s",
(unsigned long) t2p->tiff_datasize,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
read = TIFFReadEncodedTile(
input,
tile,
(tdata_t) &buffer[bufferoffset],
t2p->tiff_datasize);
if(read==-1){
TIFFError(TIFF2PDF_MODULE,
"Error on decoding tile %u of %s",
tile,
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error=T2P_ERR_ERROR;
return(0);
}
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgba_to_rgb(
(tdata_t)buffer,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth
*t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
if(t2p->pdf_sample & T2P_SAMPLE_RGBAA_TO_RGB){
t2p->tiff_datasize=t2p_sample_rgbaa_to_rgb(
(tdata_t)buffer,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth
*t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
if(t2p->pdf_sample & T2P_SAMPLE_YCBCR_TO_RGB){
TIFFError(TIFF2PDF_MODULE,
"No support for YCbCr to RGB in tile for %s",
TIFFFileName(input));
_TIFFfree(buffer);
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(t2p->pdf_sample & T2P_SAMPLE_LAB_SIGNED_TO_UNSIGNED){
t2p->tiff_datasize=t2p_sample_lab_signed_to_unsigned(
(tdata_t)buffer,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth
*t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
}
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile) != 0){
t2p_tile_collapse_left(
buffer,
TIFFTileRowSize(input),
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
t2p_disable(output);
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, t2p->tiff_photometric);
TIFFSetField(output, TIFFTAG_BITSPERSAMPLE, t2p->tiff_bitspersample);
TIFFSetField(output, TIFFTAG_SAMPLESPERPIXEL, t2p->tiff_samplesperpixel);
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile) == 0){
TIFFSetField(
output,
TIFFTAG_IMAGEWIDTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
} else {
TIFFSetField(
output,
TIFFTAG_IMAGEWIDTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
}
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile) == 0){
TIFFSetField(
output,
TIFFTAG_IMAGELENGTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
TIFFSetField(
output,
TIFFTAG_ROWSPERSTRIP,
t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
} else {
TIFFSetField(
output,
TIFFTAG_IMAGELENGTH,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
TIFFSetField(
output,
TIFFTAG_ROWSPERSTRIP,
t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
}
TIFFSetField(output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(output, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
switch(t2p->pdf_compression){
case T2P_COMPRESS_NONE:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
break;
#ifdef CCITT_SUPPORT
case T2P_COMPRESS_G4:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_CCITTFAX4);
break;
#endif
#ifdef JPEG_SUPPORT
case T2P_COMPRESS_JPEG:
if (t2p->tiff_photometric==PHOTOMETRIC_YCBCR) {
uint16 hor = 0, ver = 0;
if (TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &hor, &ver)!=0) {
if (hor != 0 && ver != 0) {
TIFFSetField(output, TIFFTAG_YCBCRSUBSAMPLING, hor, ver);
}
}
if(TIFFGetField(input, TIFFTAG_REFERENCEBLACKWHITE, &xfloatp)!=0){
TIFFSetField(output, TIFFTAG_REFERENCEBLACKWHITE, xfloatp);
}
}
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_JPEG);
TIFFSetField(output, TIFFTAG_JPEGTABLESMODE, 0); /* JPEGTABLESMODE_NONE */
if(t2p->pdf_colorspace & (T2P_CS_RGB | T2P_CS_LAB)){
TIFFSetField(output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR);
if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR){
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB);
} else {
TIFFSetField(output, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RAW);
}
}
if(t2p->pdf_colorspace & T2P_CS_GRAY){
(void)0;
}
if(t2p->pdf_colorspace & T2P_CS_CMYK){
(void)0;
}
if(t2p->pdf_defaultcompressionquality != 0){
TIFFSetField(output,
TIFFTAG_JPEGQUALITY,
t2p->pdf_defaultcompressionquality);
}
break;
#endif
#ifdef ZIP_SUPPORT
case T2P_COMPRESS_ZIP:
TIFFSetField(output, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
if(t2p->pdf_defaultcompressionquality%100 != 0){
TIFFSetField(output,
TIFFTAG_PREDICTOR,
t2p->pdf_defaultcompressionquality % 100);
}
if(t2p->pdf_defaultcompressionquality/100 != 0){
TIFFSetField(output,
TIFFTAG_ZIPQUALITY,
(t2p->pdf_defaultcompressionquality / 100));
}
break;
#endif
default:
break;
}
t2p_enable(output);
t2p->outputwritten = 0;
bufferoffset = TIFFWriteEncodedStrip(output, (tstrip_t) 0, buffer,
TIFFStripSize(output));
if (buffer != NULL) {
_TIFFfree(buffer);
buffer = NULL;
}
if (bufferoffset == -1) {
TIFFError(TIFF2PDF_MODULE,
"Error writing encoded tile to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
written = t2p->outputwritten;
return(written);
}
#ifdef OJPEG_SUPPORT
int t2p_process_ojpeg_tables(T2P* t2p, TIFF* input){
uint16 proc=0;
void* q;
uint32 q_length=0;
void* dc;
uint32 dc_length=0;
void* ac;
uint32 ac_length=0;
uint16* lp;
uint16* pt;
uint16 h_samp=1;
uint16 v_samp=1;
unsigned char* ojpegdata;
uint16 table_count;
uint32 offset_table;
uint32 offset_ms_l;
uint32 code_count;
uint32 i=0;
uint32 dest=0;
uint16 ri=0;
uint32 rows=0;
if(!TIFFGetField(input, TIFFTAG_JPEGPROC, &proc)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGProc field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(proc!=JPEGPROC_BASELINE && proc!=JPEGPROC_LOSSLESS){
TIFFError(TIFF2PDF_MODULE,
"Bad JPEGProc field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(!TIFFGetField(input, TIFFTAG_JPEGQTABLES, &q_length, &q)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGQTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(q_length < (64U * t2p->tiff_samplesperpixel)){
TIFFError(TIFF2PDF_MODULE,
"Bad JPEGQTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(!TIFFGetField(input, TIFFTAG_JPEGDCTABLES, &dc_length, &dc)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGDCTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(proc==JPEGPROC_BASELINE){
if(!TIFFGetField(input, TIFFTAG_JPEGACTABLES, &ac_length, &ac)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGACTables field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
} else {
if(!TIFFGetField(input, TIFFTAG_JPEGLOSSLESSPREDICTORS, &lp)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGLosslessPredictors field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
if(!TIFFGetField(input, TIFFTAG_JPEGPOINTTRANSFORM, &pt)){
TIFFError(TIFF2PDF_MODULE,
"Missing JPEGPointTransform field in OJPEG image %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
}
if(!TIFFGetField(input, TIFFTAG_YCBCRSUBSAMPLING, &h_samp, &v_samp)){
h_samp=1;
v_samp=1;
}
if(t2p->pdf_ojpegdata != NULL){
_TIFFfree(t2p->pdf_ojpegdata);
t2p->pdf_ojpegdata=NULL;
}
t2p->pdf_ojpegdata = _TIFFmalloc(2048);
if(t2p->pdf_ojpegdata == NULL){
TIFFError(TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_process_ojpeg_tables, %s",
2048,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return(0);
}
_TIFFmemset(t2p->pdf_ojpegdata, 0x00, 2048);
t2p->pdf_ojpegdatalength = 0;
table_count=t2p->tiff_samplesperpixel;
if(proc==JPEGPROC_BASELINE){
if(table_count>2) table_count=2;
}
ojpegdata=(unsigned char*)t2p->pdf_ojpegdata;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xd8;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
if(proc==JPEGPROC_BASELINE){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc0;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc3;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=(8 + 3*t2p->tiff_samplesperpixel);
ojpegdata[t2p->pdf_ojpegdatalength++]=(t2p->tiff_bitspersample & 0xff);
if(TIFFIsTiled(input)){
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength ) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth ) & 0xff;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_length >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_length ) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_width >> 8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=
(t2p->tiff_width ) & 0xff;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=(t2p->tiff_samplesperpixel & 0xff);
for(i=0;i<t2p->tiff_samplesperpixel;i++){
ojpegdata[t2p->pdf_ojpegdatalength++]=i;
if(i==0){
ojpegdata[t2p->pdf_ojpegdatalength] |= h_samp<<4 & 0xf0;;
ojpegdata[t2p->pdf_ojpegdatalength++] |= v_samp & 0x0f;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]= 0x11;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=i;
}
for(dest=0;dest<t2p->tiff_samplesperpixel;dest++){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xdb;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x43;
ojpegdata[t2p->pdf_ojpegdatalength++]=dest;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength++]),
&(((unsigned char*)q)[64*dest]), 64);
t2p->pdf_ojpegdatalength+=64;
}
offset_table=0;
for(dest=0;dest<table_count;dest++){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc4;
offset_ms_l=t2p->pdf_ojpegdatalength;
t2p->pdf_ojpegdatalength+=2;
ojpegdata[t2p->pdf_ojpegdatalength++]=dest & 0x0f;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)dc)[offset_table]), 16);
code_count=0;
offset_table+=16;
for(i=0;i<16;i++){
code_count+=ojpegdata[t2p->pdf_ojpegdatalength++];
}
ojpegdata[offset_ms_l]=((19+code_count)>>8) & 0xff;
ojpegdata[offset_ms_l+1]=(19+code_count) & 0xff;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)dc)[offset_table]), code_count);
offset_table+=code_count;
t2p->pdf_ojpegdatalength+=code_count;
}
if(proc==JPEGPROC_BASELINE){
offset_table=0;
for(dest=0;dest<table_count;dest++){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xc4;
offset_ms_l=t2p->pdf_ojpegdatalength;
t2p->pdf_ojpegdatalength+=2;
ojpegdata[t2p->pdf_ojpegdatalength] |= 0x10;
ojpegdata[t2p->pdf_ojpegdatalength++] |=dest & 0x0f;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)ac)[offset_table]), 16);
code_count=0;
offset_table+=16;
for(i=0;i<16;i++){
code_count+=ojpegdata[t2p->pdf_ojpegdatalength++];
}
ojpegdata[offset_ms_l]=((19+code_count)>>8) & 0xff;
ojpegdata[offset_ms_l+1]=(19+code_count) & 0xff;
_TIFFmemcpy( &(ojpegdata[t2p->pdf_ojpegdatalength]),
&(((unsigned char*)ac)[offset_table]), code_count);
offset_table+=code_count;
t2p->pdf_ojpegdatalength+=code_count;
}
}
if(TIFFNumberOfStrips(input)>1){
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xdd;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x04;
h_samp*=8;
v_samp*=8;
ri=(t2p->tiff_width+h_samp-1) / h_samp;
TIFFGetField(input, TIFFTAG_ROWSPERSTRIP, &rows);
ri*=(rows+v_samp-1)/v_samp;
ojpegdata[t2p->pdf_ojpegdatalength++]= (ri>>8) & 0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]= ri & 0xff;
}
ojpegdata[t2p->pdf_ojpegdatalength++]=0xff;
ojpegdata[t2p->pdf_ojpegdatalength++]=0xda;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x00;
ojpegdata[t2p->pdf_ojpegdatalength++]=(6 + 2*t2p->tiff_samplesperpixel);
ojpegdata[t2p->pdf_ojpegdatalength++]=t2p->tiff_samplesperpixel & 0xff;
for(i=0;i<t2p->tiff_samplesperpixel;i++){
ojpegdata[t2p->pdf_ojpegdatalength++]= i & 0xff;
if(proc==JPEGPROC_BASELINE){
ojpegdata[t2p->pdf_ojpegdatalength] |=
( ( (i>(table_count-1U)) ? (table_count-1U) : i) << 4U) & 0xf0;
ojpegdata[t2p->pdf_ojpegdatalength++] |=
( (i>(table_count-1U)) ? (table_count-1U) : i) & 0x0f;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++] = (i << 4) & 0xf0;
}
}
if(proc==JPEGPROC_BASELINE){
t2p->pdf_ojpegdatalength++;
ojpegdata[t2p->pdf_ojpegdatalength++]=0x3f;
t2p->pdf_ojpegdatalength++;
} else {
ojpegdata[t2p->pdf_ojpegdatalength++]= (lp[0] & 0xff);
t2p->pdf_ojpegdatalength++;
ojpegdata[t2p->pdf_ojpegdatalength++]= (pt[0] & 0x0f);
}
return(1);
}
#endif
#ifdef JPEG_SUPPORT
int t2p_process_jpeg_strip(
unsigned char* strip,
tsize_t* striplength,
unsigned char* buffer,
tsize_t buffersize,
tsize_t* bufferoffset,
tstrip_t no,
uint32 height){
tsize_t i=0;
while (i < *striplength) {
tsize_t datalen;
uint16 ri;
uint16 v_samp;
uint16 h_samp;
int j;
int ncomp;
/* marker header: one or more FFs */
if (strip[i] != 0xff)
return(0);
i++;
while (i < *striplength && strip[i] == 0xff)
i++;
if (i >= *striplength)
return(0);
/* SOI is the only pre-SOS marker without a length word */
if (strip[i] == 0xd8)
datalen = 0;
else {
if ((*striplength - i) <= 2)
return(0);
datalen = (strip[i+1] << 8) | strip[i+2];
if (datalen < 2 || datalen >= (*striplength - i))
return(0);
}
switch( strip[i] ){
case 0xd8: /* SOI - start of image */
if( *bufferoffset + 2 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), 2);
*bufferoffset+=2;
break;
case 0xc0: /* SOF0 */
case 0xc1: /* SOF1 */
case 0xc3: /* SOF3 */
case 0xc9: /* SOF9 */
case 0xca: /* SOF10 */
if(no==0){
if( *bufferoffset + datalen + 2 + 6 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2);
if( *bufferoffset + 9 >= buffersize )
return(0);
ncomp = buffer[*bufferoffset+9];
if (ncomp < 1 || ncomp > 4)
return(0);
v_samp=1;
h_samp=1;
if( *bufferoffset + 11 + 3*(ncomp-1) >= buffersize )
return(0);
for(j=0;j<ncomp;j++){
uint16 samp = buffer[*bufferoffset+11+(3*j)];
if( (samp>>4) > h_samp)
h_samp = (samp>>4);
if( (samp & 0x0f) > v_samp)
v_samp = (samp & 0x0f);
}
v_samp*=8;
h_samp*=8;
ri=((( ((uint16)(buffer[*bufferoffset+5])<<8) |
(uint16)(buffer[*bufferoffset+6]) )+v_samp-1)/
v_samp);
ri*=((( ((uint16)(buffer[*bufferoffset+7])<<8) |
(uint16)(buffer[*bufferoffset+8]) )+h_samp-1)/
h_samp);
buffer[*bufferoffset+5]=
(unsigned char) ((height>>8) & 0xff);
buffer[*bufferoffset+6]=
(unsigned char) (height & 0xff);
*bufferoffset+=datalen+2;
/* insert a DRI marker */
buffer[(*bufferoffset)++]=0xff;
buffer[(*bufferoffset)++]=0xdd;
buffer[(*bufferoffset)++]=0x00;
buffer[(*bufferoffset)++]=0x04;
buffer[(*bufferoffset)++]=(ri >> 8) & 0xff;
buffer[(*bufferoffset)++]= ri & 0xff;
}
break;
case 0xc4: /* DHT */
case 0xdb: /* DQT */
if( *bufferoffset + datalen + 2 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2);
*bufferoffset+=datalen+2;
break;
case 0xda: /* SOS */
if(no==0){
if( *bufferoffset + datalen + 2 > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i-1]), datalen+2);
*bufferoffset+=datalen+2;
} else {
if( *bufferoffset + 2 > buffersize )
return(0);
buffer[(*bufferoffset)++]=0xff;
buffer[(*bufferoffset)++]=
(unsigned char)(0xd0 | ((no-1)%8));
}
i += datalen + 1;
/* copy remainder of strip */
if( *bufferoffset + *striplength - i > buffersize )
return(0);
_TIFFmemcpy(&(buffer[*bufferoffset]), &(strip[i]), *striplength - i);
*bufferoffset+= *striplength - i;
return(1);
default:
/* ignore any other marker */
break;
}
i += datalen + 1;
}
/* failed to find SOS marker */
return(0);
}
#endif
/*
This functions converts a tilewidth x tilelength buffer of samples into an edgetilewidth x
tilelength buffer of samples.
*/
void t2p_tile_collapse_left(
tdata_t buffer,
tsize_t scanwidth,
uint32 tilewidth,
uint32 edgetilewidth,
uint32 tilelength){
uint32 i;
tsize_t edgescanwidth=0;
edgescanwidth = (scanwidth * edgetilewidth + (tilewidth - 1))/ tilewidth;
for(i=0;i<tilelength;i++){
/* We use memmove() since there can be overlaps in src and dst buffers for the first items */
memmove(
&(((char*)buffer)[edgescanwidth*i]),
&(((char*)buffer)[scanwidth*i]),
edgescanwidth);
}
return;
}
/*
* This function calls TIFFWriteDirectory on the output after blanking its
* output by replacing the read, write, and seek procedures with empty
* implementations, then it replaces the original implementations.
*/
void
t2p_write_advance_directory(T2P* t2p, TIFF* output)
{
t2p_disable(output);
if(!TIFFWriteDirectory(output)){
TIFFError(TIFF2PDF_MODULE,
"Error writing virtual directory to output PDF %s",
TIFFFileName(output));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p_enable(output);
return;
}
tsize_t t2p_sample_planar_separate_to_contig(
T2P* t2p,
unsigned char* buffer,
unsigned char* samplebuffer,
tsize_t samplebuffersize){
tsize_t stride=0;
tsize_t i=0;
tsize_t j=0;
stride=samplebuffersize/t2p->tiff_samplesperpixel;
for(i=0;i<stride;i++){
for(j=0;j<t2p->tiff_samplesperpixel;j++){
buffer[i*t2p->tiff_samplesperpixel + j] = samplebuffer[i + j*stride];
}
}
return(samplebuffersize);
}
tsize_t t2p_sample_realize_palette(T2P* t2p, unsigned char* buffer){
uint32 sample_count=0;
uint16 component_count=0;
uint32 palette_offset=0;
uint32 sample_offset=0;
uint32 i=0;
uint32 j=0;
sample_count=t2p->tiff_width*t2p->tiff_length;
component_count=t2p->tiff_samplesperpixel;
if( sample_count * component_count > t2p->tiff_datasize )
{
TIFFError(TIFF2PDF_MODULE, "Error: sample_count * component_count > t2p->tiff_datasize");
t2p->t2p_error = T2P_ERR_ERROR;
return 1;
}
for(i=sample_count;i>0;i--){
palette_offset=buffer[i-1] * component_count;
sample_offset= (i-1) * component_count;
for(j=0;j<component_count;j++){
buffer[sample_offset+j]=t2p->pdf_palette[palette_offset+j];
}
}
return(0);
}
/*
This functions converts in place a buffer of ABGR interleaved data
into RGB interleaved data, discarding A.
*/
tsize_t t2p_sample_abgr_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i=0;
uint32 sample=0;
for(i=0;i<samplecount;i++){
sample=((uint32*)data)[i];
((char*)data)[i*3]= (char) (sample & 0xff);
((char*)data)[i*3+1]= (char) ((sample>>8) & 0xff);
((char*)data)[i*3+2]= (char) ((sample>>16) & 0xff);
}
return(i*3);
}
/*
* This functions converts in place a buffer of RGBA interleaved data
* into RGB interleaved data, discarding A.
*/
tsize_t
t2p_sample_rgbaa_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i;
/* For the 3 first samples, there is overlapping between souce and
destination, so use memmove().
See http://bugzilla.maptools.org/show_bug.cgi?id=2577 */
for(i = 0; i < 3 && i < samplecount; i++)
memmove((uint8*)data + i * 3, (uint8*)data + i * 4, 3);
for(; i < samplecount; i++)
memcpy((uint8*)data + i * 3, (uint8*)data + i * 4, 3);
return(i * 3);
}
/*
* This functions converts in place a buffer of RGBA interleaved data
* into RGB interleaved data, adding 255-A to each component sample.
*/
tsize_t
t2p_sample_rgba_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i = 0;
uint32 sample = 0;
uint8 alpha = 0;
for (i = 0; i < samplecount; i++) {
sample=((uint32*)data)[i];
alpha=(uint8)((255 - ((sample >> 24) & 0xff)));
((uint8 *)data)[i * 3] = (uint8) ((sample >> 16) & 0xff) + alpha;
((uint8 *)data)[i * 3 + 1] = (uint8) ((sample >> 8) & 0xff) + alpha;
((uint8 *)data)[i * 3 + 2] = (uint8) (sample & 0xff) + alpha;
}
return (i * 3);
}
/*
This function converts the a and b samples of Lab data from signed
to unsigned.
*/
tsize_t t2p_sample_lab_signed_to_unsigned(tdata_t buffer, uint32 samplecount){
uint32 i=0;
for(i=0;i<samplecount;i++){
if( (((unsigned char*)buffer)[(i*3)+1] & 0x80) !=0){
((unsigned char*)buffer)[(i*3)+1] =
(unsigned char)(0x80 + ((char*)buffer)[(i*3)+1]);
} else {
((unsigned char*)buffer)[(i*3)+1] |= 0x80;
}
if( (((unsigned char*)buffer)[(i*3)+2] & 0x80) !=0){
((unsigned char*)buffer)[(i*3)+2] =
(unsigned char)(0x80 + ((char*)buffer)[(i*3)+2]);
} else {
((unsigned char*)buffer)[(i*3)+2] |= 0x80;
}
}
return(samplecount*3);
}
/*
This function writes the PDF header to output.
*/
tsize_t t2p_write_pdf_header(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[16];
int buflen=0;
buflen = snprintf(buffer, sizeof(buffer), "%%PDF-%u.%u ",
t2p->pdf_majorversion&0xff,
t2p->pdf_minorversion&0xff);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t)"\n%\342\343\317\323\n", 7);
return(written);
}
/*
This function writes the beginning of a PDF object to output.
*/
tsize_t t2p_write_pdf_obj_start(uint32 number, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)number);
check_snprintf_ret((T2P*)NULL, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen );
written += t2pWriteFile(output, (tdata_t) " 0 obj\n", 7);
return(written);
}
/*
This function writes the end of a PDF object to output.
*/
tsize_t t2p_write_pdf_obj_end(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "endobj\n", 7);
return(written);
}
/*
This function writes a PDF name object to output.
*/
tsize_t t2p_write_pdf_name(unsigned char* name, TIFF* output){
tsize_t written=0;
uint32 i=0;
char buffer[64];
uint16 nextchar=0;
size_t namelen=0;
namelen = strlen((char *)name);
if (namelen>126) {
namelen=126;
}
written += t2pWriteFile(output, (tdata_t) "/", 1);
for (i=0;i<namelen;i++){
if ( ((unsigned char)name[i]) < 0x21){
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
nextchar=1;
}
if ( ((unsigned char)name[i]) > 0x7E){
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
nextchar=1;
}
if (nextchar==0){
switch (name[i]){
case 0x23:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x25:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x28:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x29:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x2F:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x3C:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x3E:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x5B:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x5D:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x7B:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
case 0x7D:
snprintf(buffer, sizeof(buffer), "#%.2X", name[i]);
buffer[sizeof(buffer) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) buffer, 3);
break;
default:
written += t2pWriteFile(output, (tdata_t) &name[i], 1);
}
}
nextchar=0;
}
written += t2pWriteFile(output, (tdata_t) " ", 1);
return(written);
}
/*
* This function writes a PDF string object to output.
*/
tsize_t t2p_write_pdf_string(char* pdfstr, TIFF* output)
{
tsize_t written = 0;
uint32 i = 0;
char buffer[64];
size_t len = 0;
len = strlen(pdfstr);
written += t2pWriteFile(output, (tdata_t) "(", 1);
for (i=0; i<len; i++) {
if((pdfstr[i]&0x80) || (pdfstr[i]==127) || (pdfstr[i]<32)){
snprintf(buffer, sizeof(buffer), "\\%.3o", ((unsigned char)pdfstr[i]));
written += t2pWriteFile(output, (tdata_t)buffer, 4);
} else {
switch (pdfstr[i]){
case 0x08:
written += t2pWriteFile(output, (tdata_t) "\\b", 2);
break;
case 0x09:
written += t2pWriteFile(output, (tdata_t) "\\t", 2);
break;
case 0x0A:
written += t2pWriteFile(output, (tdata_t) "\\n", 2);
break;
case 0x0C:
written += t2pWriteFile(output, (tdata_t) "\\f", 2);
break;
case 0x0D:
written += t2pWriteFile(output, (tdata_t) "\\r", 2);
break;
case 0x28:
written += t2pWriteFile(output, (tdata_t) "\\(", 2);
break;
case 0x29:
written += t2pWriteFile(output, (tdata_t) "\\)", 2);
break;
case 0x5C:
written += t2pWriteFile(output, (tdata_t) "\\\\", 2);
break;
default:
written += t2pWriteFile(output, (tdata_t) &pdfstr[i], 1);
}
}
}
written += t2pWriteFile(output, (tdata_t) ") ", 1);
return(written);
}
/*
This function writes a buffer of data to output.
*/
tsize_t t2p_write_pdf_stream(tdata_t buffer, tsize_t len, TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) buffer, len);
return(written);
}
/*
This functions writes the beginning of a PDF stream to output.
*/
tsize_t t2p_write_pdf_stream_start(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "stream\n", 7);
return(written);
}
/*
This function writes the end of a PDF stream to output.
*/
tsize_t t2p_write_pdf_stream_end(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "\nendstream\n", 11);
return(written);
}
/*
This function writes a stream dictionary for a PDF stream to output.
*/
tsize_t t2p_write_pdf_stream_dict(tsize_t len, uint32 number, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "/Length ", 8);
if(len!=0){
written += t2p_write_pdf_stream_length(len, output);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)number);
check_snprintf_ret((T2P*)NULL, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
}
return(written);
}
/*
This functions writes the beginning of a PDF stream dictionary to output.
*/
tsize_t t2p_write_pdf_stream_dict_start(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) "<< \n", 4);
return(written);
}
/*
This function writes the end of a PDF stream dictionary to output.
*/
tsize_t t2p_write_pdf_stream_dict_end(TIFF* output){
tsize_t written=0;
written += t2pWriteFile(output, (tdata_t) " >>\n", 4);
return(written);
}
/*
This function writes a number to output.
*/
tsize_t t2p_write_pdf_stream_length(tsize_t len, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)len);
check_snprintf_ret((T2P*)NULL, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
return(written);
}
/*
* This function writes the PDF Catalog structure to output.
*/
tsize_t t2p_write_pdf_catalog(T2P* t2p, TIFF* output)
{
tsize_t written = 0;
char buffer[32];
int buflen = 0;
written += t2pWriteFile(output,
(tdata_t)"<< \n/Type /Catalog \n/Pages ",
27);
buflen = snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_pages);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer,
TIFFmin((size_t)buflen, sizeof(buffer) - 1));
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
if(t2p->pdf_fitwindow){
written += t2pWriteFile(output,
(tdata_t) "/ViewerPreferences <</FitWindow true>>\n",
39);
}
written += t2pWriteFile(output, (tdata_t)">>\n", 3);
return(written);
}
/*
This function writes the PDF Info structure to output.
*/
tsize_t t2p_write_pdf_info(T2P* t2p, TIFF* input, TIFF* output)
{
tsize_t written = 0;
char* info;
char buffer[512];
if(t2p->pdf_datetime[0] == '\0')
t2p_pdf_tifftime(t2p, input);
if (strlen(t2p->pdf_datetime) > 0) {
written += t2pWriteFile(output, (tdata_t) "<< \n/CreationDate ", 18);
written += t2p_write_pdf_string(t2p->pdf_datetime, output);
written += t2pWriteFile(output, (tdata_t) "\n/ModDate ", 10);
written += t2p_write_pdf_string(t2p->pdf_datetime, output);
}
written += t2pWriteFile(output, (tdata_t) "\n/Producer ", 11);
snprintf(buffer, sizeof(buffer), "libtiff / tiff2pdf - %d", TIFFLIB_VERSION);
written += t2p_write_pdf_string(buffer, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
if (t2p->pdf_creator[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Creator ", 9);
written += t2p_write_pdf_string(t2p->pdf_creator, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if (TIFFGetField(input, TIFFTAG_SOFTWARE, &info) != 0 && info) {
if(strlen(info) >= sizeof(t2p->pdf_creator))
info[sizeof(t2p->pdf_creator) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) "/Creator ", 9);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_author[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Author ", 8);
written += t2p_write_pdf_string(t2p->pdf_author, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if ((TIFFGetField(input, TIFFTAG_ARTIST, &info) != 0
|| TIFFGetField(input, TIFFTAG_COPYRIGHT, &info) != 0)
&& info) {
if (strlen(info) >= sizeof(t2p->pdf_author))
info[sizeof(t2p->pdf_author) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) "/Author ", 8);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_title[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Title ", 7);
written += t2p_write_pdf_string(t2p->pdf_title, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if (TIFFGetField(input, TIFFTAG_DOCUMENTNAME, &info) != 0){
if(strlen(info) > 511) {
info[512] = '\0';
}
written += t2pWriteFile(output, (tdata_t) "/Title ", 7);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_subject[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Subject ", 9);
written += t2p_write_pdf_string(t2p->pdf_subject, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
} else {
if (TIFFGetField(input, TIFFTAG_IMAGEDESCRIPTION, &info) != 0 && info) {
if (strlen(info) >= sizeof(t2p->pdf_subject))
info[sizeof(t2p->pdf_subject) - 1] = '\0';
written += t2pWriteFile(output, (tdata_t) "/Subject ", 9);
written += t2p_write_pdf_string(info, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
if (t2p->pdf_keywords[0] != '\0') {
written += t2pWriteFile(output, (tdata_t) "/Keywords ", 10);
written += t2p_write_pdf_string(t2p->pdf_keywords, output);
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
written += t2pWriteFile(output, (tdata_t) ">> \n", 4);
return(written);
}
/*
* This function fills a string of a T2P struct with the current time as a PDF
* date string, it is called by t2p_pdf_tifftime.
*/
void t2p_pdf_currenttime(T2P* t2p)
{
struct tm* currenttime;
time_t timenow;
if (time(&timenow) == (time_t) -1) {
TIFFError(TIFF2PDF_MODULE,
"Can't get the current time: %s", strerror(errno));
timenow = (time_t) 0;
}
currenttime = localtime(&timenow);
snprintf(t2p->pdf_datetime, sizeof(t2p->pdf_datetime),
"D:%.4d%.2d%.2d%.2d%.2d%.2d",
(currenttime->tm_year + 1900) % 65536,
(currenttime->tm_mon + 1) % 256,
(currenttime->tm_mday) % 256,
(currenttime->tm_hour) % 256,
(currenttime->tm_min) % 256,
(currenttime->tm_sec) % 256);
return;
}
/*
* This function fills a string of a T2P struct with the date and time of a
* TIFF file if it exists or the current time as a PDF date string.
*/
void t2p_pdf_tifftime(T2P* t2p, TIFF* input)
{
char* datetime;
if (TIFFGetField(input, TIFFTAG_DATETIME, &datetime) != 0
&& (strlen(datetime) >= 19) ){
t2p->pdf_datetime[0]='D';
t2p->pdf_datetime[1]=':';
t2p->pdf_datetime[2]=datetime[0];
t2p->pdf_datetime[3]=datetime[1];
t2p->pdf_datetime[4]=datetime[2];
t2p->pdf_datetime[5]=datetime[3];
t2p->pdf_datetime[6]=datetime[5];
t2p->pdf_datetime[7]=datetime[6];
t2p->pdf_datetime[8]=datetime[8];
t2p->pdf_datetime[9]=datetime[9];
t2p->pdf_datetime[10]=datetime[11];
t2p->pdf_datetime[11]=datetime[12];
t2p->pdf_datetime[12]=datetime[14];
t2p->pdf_datetime[13]=datetime[15];
t2p->pdf_datetime[14]=datetime[17];
t2p->pdf_datetime[15]=datetime[18];
t2p->pdf_datetime[16] = '\0';
} else {
t2p_pdf_currenttime(t2p);
}
return;
}
/*
* This function writes a PDF Pages Tree structure to output.
*/
tsize_t t2p_write_pdf_pages(T2P* t2p, TIFF* output)
{
tsize_t written=0;
tdir_t i=0;
char buffer[32];
int buflen=0;
int page=0;
written += t2pWriteFile(output,
(tdata_t) "<< \n/Type /Pages \n/Kids [ ", 26);
page = t2p->pdf_pages+1;
for (i=0;i<t2p->tiff_pagecount;i++){
buflen=snprintf(buffer, sizeof(buffer), "%d", page);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
if ( ((i+1)%8)==0 ) {
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
page +=3;
page += t2p->tiff_pages[i].page_extra;
if(t2p->tiff_pages[i].page_tilecount>0){
page += (2 * t2p->tiff_pages[i].page_tilecount);
} else {
page +=2;
}
}
written += t2pWriteFile(output, (tdata_t) "] \n/Count ", 10);
buflen=snprintf(buffer, sizeof(buffer), "%d", t2p->tiff_pagecount);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " \n>> \n", 6);
return(written);
}
/*
This function writes a PDF Page structure to output.
*/
tsize_t t2p_write_pdf_page(uint32 object, T2P* t2p, TIFF* output){
unsigned int i=0;
tsize_t written=0;
char buffer[256];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "<<\n/Type /Page \n/Parent ", 24);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_pages);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
written += t2pWriteFile(output, (tdata_t) "/MediaBox [", 11);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.x1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.y1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.x2);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%.4f",t2p->pdf_mediabox.y2);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "] \n", 3);
written += t2pWriteFile(output, (tdata_t) "/Contents ", 10);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(object + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n", 6);
written += t2pWriteFile(output, (tdata_t) "/Resources << \n", 15);
if( t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount != 0 ){
written += t2pWriteFile(output, (tdata_t) "/XObject <<\n", 12);
for(i=0;i<t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount;i++){
written += t2pWriteFile(output, (tdata_t) "/Im", 3);
buflen = snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "_", 1);
buflen = snprintf(buffer, sizeof(buffer), "%u", i+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(object+3+(2*i)+t2p->tiff_pages[t2p->pdf_page].page_extra));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
if(i%4==3){
written += t2pWriteFile(output, (tdata_t) "\n", 1);
}
}
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
} else {
written += t2pWriteFile(output, (tdata_t) "/XObject <<\n", 12);
written += t2pWriteFile(output, (tdata_t) "/Im", 3);
buflen = snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(object+3+(2*i)+t2p->tiff_pages[t2p->pdf_page].page_extra));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
}
if(t2p->tiff_transferfunctioncount != 0) {
written += t2pWriteFile(output, (tdata_t) "/ExtGState <<", 13);
t2pWriteFile(output, (tdata_t) "/GS1 ", 5);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(object + 3));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
written += t2pWriteFile(output, (tdata_t) ">> \n", 4);
}
written += t2pWriteFile(output, (tdata_t) "/ProcSet [ ", 11);
if(t2p->pdf_colorspace & T2P_CS_BILEVEL
|| t2p->pdf_colorspace & T2P_CS_GRAY
){
written += t2pWriteFile(output, (tdata_t) "/ImageB ", 8);
} else {
written += t2pWriteFile(output, (tdata_t) "/ImageC ", 8);
if(t2p->pdf_colorspace & T2P_CS_PALETTE){
written += t2pWriteFile(output, (tdata_t) "/ImageI ", 8);
}
}
written += t2pWriteFile(output, (tdata_t) "]\n>>\n>>\n", 8);
return(written);
}
/*
This function composes the page size and image and tile locations on a page.
*/
void t2p_compose_pdf_page(T2P* t2p){
uint32 i=0;
uint32 i2=0;
T2P_TILE* tiles=NULL;
T2P_BOX* boxp=NULL;
uint32 tilecountx=0;
uint32 tilecounty=0;
uint32 tilewidth=0;
uint32 tilelength=0;
int istiled=0;
float f=0;
float width_ratio=0;
float length_ratio=0;
t2p->pdf_xres = t2p->tiff_xres;
t2p->pdf_yres = t2p->tiff_yres;
if(t2p->pdf_overrideres) {
t2p->pdf_xres = t2p->pdf_defaultxres;
t2p->pdf_yres = t2p->pdf_defaultyres;
}
if(t2p->pdf_xres == 0.0)
t2p->pdf_xres = t2p->pdf_defaultxres;
if(t2p->pdf_yres == 0.0)
t2p->pdf_yres = t2p->pdf_defaultyres;
if (t2p->pdf_image_fillpage) {
width_ratio = t2p->pdf_defaultpagewidth/t2p->tiff_width;
length_ratio = t2p->pdf_defaultpagelength/t2p->tiff_length;
if (width_ratio < length_ratio ) {
t2p->pdf_imagewidth = t2p->pdf_defaultpagewidth;
t2p->pdf_imagelength = t2p->tiff_length * width_ratio;
} else {
t2p->pdf_imagewidth = t2p->tiff_width * length_ratio;
t2p->pdf_imagelength = t2p->pdf_defaultpagelength;
}
} else if (t2p->tiff_resunit != RESUNIT_CENTIMETER /* RESUNIT_NONE and */
&& t2p->tiff_resunit != RESUNIT_INCH) { /* other cases */
t2p->pdf_imagewidth = ((float)(t2p->tiff_width))/t2p->pdf_xres;
t2p->pdf_imagelength = ((float)(t2p->tiff_length))/t2p->pdf_yres;
} else {
t2p->pdf_imagewidth =
((float)(t2p->tiff_width))*PS_UNIT_SIZE/t2p->pdf_xres;
t2p->pdf_imagelength =
((float)(t2p->tiff_length))*PS_UNIT_SIZE/t2p->pdf_yres;
}
if(t2p->pdf_overridepagesize != 0) {
t2p->pdf_pagewidth = t2p->pdf_defaultpagewidth;
t2p->pdf_pagelength = t2p->pdf_defaultpagelength;
} else {
t2p->pdf_pagewidth = t2p->pdf_imagewidth;
t2p->pdf_pagelength = t2p->pdf_imagelength;
}
t2p->pdf_mediabox.x1=0.0;
t2p->pdf_mediabox.y1=0.0;
t2p->pdf_mediabox.x2=t2p->pdf_pagewidth;
t2p->pdf_mediabox.y2=t2p->pdf_pagelength;
t2p->pdf_imagebox.x1=0.0;
t2p->pdf_imagebox.y1=0.0;
t2p->pdf_imagebox.x2=t2p->pdf_imagewidth;
t2p->pdf_imagebox.y2=t2p->pdf_imagelength;
if(t2p->pdf_overridepagesize!=0){
t2p->pdf_imagebox.x1+=((t2p->pdf_pagewidth-t2p->pdf_imagewidth)/2.0F);
t2p->pdf_imagebox.y1+=((t2p->pdf_pagelength-t2p->pdf_imagelength)/2.0F);
t2p->pdf_imagebox.x2+=((t2p->pdf_pagewidth-t2p->pdf_imagewidth)/2.0F);
t2p->pdf_imagebox.y2+=((t2p->pdf_pagelength-t2p->pdf_imagelength)/2.0F);
}
if(t2p->tiff_orientation > 4){
f=t2p->pdf_mediabox.x2;
t2p->pdf_mediabox.x2=t2p->pdf_mediabox.y2;
t2p->pdf_mediabox.y2=f;
}
istiled=((t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecount==0) ? 0 : 1;
if(istiled==0){
t2p_compose_pdf_page_orient(&(t2p->pdf_imagebox), t2p->tiff_orientation);
return;
} else {
tilewidth=(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilewidth;
tilelength=(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilelength;
if( tilewidth > INT_MAX ||
tilelength > INT_MAX ||
t2p->tiff_width > INT_MAX - tilewidth ||
t2p->tiff_length > INT_MAX - tilelength )
{
TIFFError(TIFF2PDF_MODULE, "Integer overflow");
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
tilecountx=(t2p->tiff_width +
tilewidth -1)/
tilewidth;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecountx=tilecountx;
tilecounty=(t2p->tiff_length +
tilelength -1)/
tilelength;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecounty=tilecounty;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_edgetilewidth=
t2p->tiff_width % tilewidth;
(t2p->tiff_tiles[t2p->pdf_page]).tiles_edgetilelength=
t2p->tiff_length % tilelength;
tiles=(t2p->tiff_tiles[t2p->pdf_page]).tiles_tiles;
for(i2=0;i2<tilecounty-1;i2++){
for(i=0;i<tilecountx-1;i++){
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * (i+1) * tilewidth)
/ (float)t2p->tiff_width);
boxp->y1 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * (i2+1) * tilelength)
/ (float)t2p->tiff_length);
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 = t2p->pdf_imagebox.x2;
boxp->y1 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * (i2+1) * tilelength)
/ (float)t2p->tiff_length);
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
for(i=0;i<tilecountx-1;i++){
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * (i+1) * tilewidth)
/ (float)t2p->tiff_width);
boxp->y1 = t2p->pdf_imagebox.y1;
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
boxp=&(tiles[i2*tilecountx+i].tile_box);
boxp->x1 =
t2p->pdf_imagebox.x1
+ ((float)(t2p->pdf_imagewidth * i * tilewidth)
/ (float)t2p->tiff_width);
boxp->x2 = t2p->pdf_imagebox.x2;
boxp->y1 = t2p->pdf_imagebox.y1;
boxp->y2 =
t2p->pdf_imagebox.y2
- ((float)(t2p->pdf_imagelength * i2 * tilelength)
/ (float)t2p->tiff_length);
}
if(t2p->tiff_orientation==0 || t2p->tiff_orientation==1){
for(i=0;i<(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecount;i++){
t2p_compose_pdf_page_orient( &(tiles[i].tile_box) , 0);
}
return;
}
for(i=0;i<(t2p->tiff_tiles[t2p->pdf_page]).tiles_tilecount;i++){
boxp=&(tiles[i].tile_box);
boxp->x1 -= t2p->pdf_imagebox.x1;
boxp->x2 -= t2p->pdf_imagebox.x1;
boxp->y1 -= t2p->pdf_imagebox.y1;
boxp->y2 -= t2p->pdf_imagebox.y1;
if(t2p->tiff_orientation==2 || t2p->tiff_orientation==3){
boxp->x1 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x1;
boxp->x2 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x2;
}
if(t2p->tiff_orientation==3 || t2p->tiff_orientation==4){
boxp->y1 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y1;
boxp->y2 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y2;
}
if(t2p->tiff_orientation==8 || t2p->tiff_orientation==5){
boxp->y1 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y1;
boxp->y2 = t2p->pdf_imagebox.y2 - t2p->pdf_imagebox.y1 - boxp->y2;
}
if(t2p->tiff_orientation==5 || t2p->tiff_orientation==6){
boxp->x1 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x1;
boxp->x2 = t2p->pdf_imagebox.x2 - t2p->pdf_imagebox.x1 - boxp->x2;
}
if(t2p->tiff_orientation > 4){
f=boxp->x1;
boxp->x1 = boxp->y1;
boxp->y1 = f;
f=boxp->x2;
boxp->x2 = boxp->y2;
boxp->y2 = f;
t2p_compose_pdf_page_orient_flip(boxp, t2p->tiff_orientation);
} else {
t2p_compose_pdf_page_orient(boxp, t2p->tiff_orientation);
}
}
return;
}
void t2p_compose_pdf_page_orient(T2P_BOX* boxp, uint16 orientation){
float m1[9];
float f=0.0;
if( boxp->x1 > boxp->x2){
f=boxp->x1;
boxp->x1=boxp->x2;
boxp->x2 = f;
}
if( boxp->y1 > boxp->y2){
f=boxp->y1;
boxp->y1=boxp->y2;
boxp->y2 = f;
}
boxp->mat[0]=m1[0]=boxp->x2-boxp->x1;
boxp->mat[1]=m1[1]=0.0;
boxp->mat[2]=m1[2]=0.0;
boxp->mat[3]=m1[3]=0.0;
boxp->mat[4]=m1[4]=boxp->y2-boxp->y1;
boxp->mat[5]=m1[5]=0.0;
boxp->mat[6]=m1[6]=boxp->x1;
boxp->mat[7]=m1[7]=boxp->y1;
boxp->mat[8]=m1[8]=1.0;
switch(orientation){
case 0:
case 1:
break;
case 2:
boxp->mat[0]=0.0F-m1[0];
boxp->mat[6]+=m1[0];
break;
case 3:
boxp->mat[0]=0.0F-m1[0];
boxp->mat[4]=0.0F-m1[4];
boxp->mat[6]+=m1[0];
boxp->mat[7]+=m1[4];
break;
case 4:
boxp->mat[4]=0.0F-m1[4];
boxp->mat[7]+=m1[4];
break;
case 5:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[0];
boxp->mat[3]=0.0F-m1[4];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[4];
boxp->mat[7]+=m1[0];
break;
case 6:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[0];
boxp->mat[3]=m1[4];
boxp->mat[4]=0.0F;
boxp->mat[7]+=m1[0];
break;
case 7:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[0];
boxp->mat[3]=m1[4];
boxp->mat[4]=0.0F;
break;
case 8:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[0];
boxp->mat[3]=0.0F-m1[4];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[4];
break;
}
return;
}
void t2p_compose_pdf_page_orient_flip(T2P_BOX* boxp, uint16 orientation){
float m1[9];
float f=0.0;
if( boxp->x1 > boxp->x2){
f=boxp->x1;
boxp->x1=boxp->x2;
boxp->x2 = f;
}
if( boxp->y1 > boxp->y2){
f=boxp->y1;
boxp->y1=boxp->y2;
boxp->y2 = f;
}
boxp->mat[0]=m1[0]=boxp->x2-boxp->x1;
boxp->mat[1]=m1[1]=0.0F;
boxp->mat[2]=m1[2]=0.0F;
boxp->mat[3]=m1[3]=0.0F;
boxp->mat[4]=m1[4]=boxp->y2-boxp->y1;
boxp->mat[5]=m1[5]=0.0F;
boxp->mat[6]=m1[6]=boxp->x1;
boxp->mat[7]=m1[7]=boxp->y1;
boxp->mat[8]=m1[8]=1.0F;
switch(orientation){
case 5:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[4];
boxp->mat[3]=0.0F-m1[0];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[0];
boxp->mat[7]+=m1[4];
break;
case 6:
boxp->mat[0]=0.0F;
boxp->mat[1]=0.0F-m1[4];
boxp->mat[3]=m1[0];
boxp->mat[4]=0.0F;
boxp->mat[7]+=m1[4];
break;
case 7:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[4];
boxp->mat[3]=m1[0];
boxp->mat[4]=0.0F;
break;
case 8:
boxp->mat[0]=0.0F;
boxp->mat[1]=m1[4];
boxp->mat[3]=0.0F-m1[0];
boxp->mat[4]=0.0F;
boxp->mat[6]+=m1[0];
break;
}
return;
}
/*
This function writes a PDF Contents stream to output.
*/
tsize_t t2p_write_pdf_page_content_stream(T2P* t2p, TIFF* output){
tsize_t written=0;
ttile_t i=0;
char buffer[512];
int buflen=0;
T2P_BOX box;
if(t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount>0){
for(i=0;i<t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount; i++){
box=t2p->tiff_tiles[t2p->pdf_page].tiles_tiles[i].tile_box;
buflen=snprintf(buffer, sizeof(buffer),
"q %s %.4f %.4f %.4f %.4f %.4f %.4f cm /Im%d_%ld Do Q\n",
t2p->tiff_transferfunctioncount?"/GS1 gs ":"",
box.mat[0],
box.mat[1],
box.mat[3],
box.mat[4],
box.mat[6],
box.mat[7],
t2p->pdf_page + 1,
(long)(i + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2p_write_pdf_stream(buffer, buflen, output);
}
} else {
box=t2p->pdf_imagebox;
buflen=snprintf(buffer, sizeof(buffer),
"q %s %.4f %.4f %.4f %.4f %.4f %.4f cm /Im%d Do Q\n",
t2p->tiff_transferfunctioncount?"/GS1 gs ":"",
box.mat[0],
box.mat[1],
box.mat[3],
box.mat[4],
box.mat[6],
box.mat[7],
t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2p_write_pdf_stream(buffer, buflen, output);
}
return(written);
}
/*
This function writes a PDF Image XObject stream dictionary to output.
*/
tsize_t t2p_write_pdf_xobject_stream_dict(ttile_t tile,
T2P* t2p,
TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2p_write_pdf_stream_dict(0, t2p->pdf_xrefcount+1, output);
written += t2pWriteFile(output,
(tdata_t) "/Type /XObject \n/Subtype /Image \n/Name /Im",
42);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
if(tile != 0){
written += t2pWriteFile(output, (tdata_t) "_", 1);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)tile);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
written += t2pWriteFile(output, (tdata_t) "\n/Width ", 8);
if(tile==0){
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->tiff_width);
} else {
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)!=0){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
}
}
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Height ", 9);
if(tile==0){
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->tiff_length);
} else {
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)!=0){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
}
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/BitsPerComponent ", 19);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_bitspersample);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/ColorSpace ", 13);
written += t2p_write_pdf_xobject_cs(t2p, output);
if (t2p->pdf_image_interpolate)
written += t2pWriteFile(output,
(tdata_t) "\n/Interpolate true", 18);
if( (t2p->pdf_switchdecode != 0)
#ifdef CCITT_SUPPORT
&& ! (t2p->pdf_colorspace & T2P_CS_BILEVEL
&& t2p->pdf_compression == T2P_COMPRESS_G4)
#endif
){
written += t2p_write_pdf_xobject_decode(t2p, output);
}
written += t2p_write_pdf_xobject_stream_filter(tile, t2p, output);
return(written);
}
/*
* This function writes a PDF Image XObject Colorspace name to output.
*/
tsize_t t2p_write_pdf_xobject_cs(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[128];
int buflen=0;
float X_W=1.0;
float Y_W=1.0;
float Z_W=1.0;
if( (t2p->pdf_colorspace & T2P_CS_ICCBASED) != 0){
written += t2p_write_pdf_xobject_icccs(t2p, output);
return(written);
}
if( (t2p->pdf_colorspace & T2P_CS_PALETTE) != 0){
written += t2pWriteFile(output, (tdata_t) "[ /Indexed ", 11);
t2p->pdf_colorspace ^= T2P_CS_PALETTE;
written += t2p_write_pdf_xobject_cs(t2p, output);
t2p->pdf_colorspace |= T2P_CS_PALETTE;
buflen=snprintf(buffer, sizeof(buffer), "%u", (0x0001 << t2p->tiff_bitspersample)-1 );
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " ", 1);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_palettecs );
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ]\n", 7);
return(written);
}
if(t2p->pdf_colorspace & T2P_CS_BILEVEL){
written += t2pWriteFile(output, (tdata_t) "/DeviceGray \n", 13);
}
if(t2p->pdf_colorspace & T2P_CS_GRAY){
if(t2p->pdf_colorspace & T2P_CS_CALGRAY){
written += t2p_write_pdf_xobject_calcs(t2p, output);
} else {
written += t2pWriteFile(output, (tdata_t) "/DeviceGray \n", 13);
}
}
if(t2p->pdf_colorspace & T2P_CS_RGB){
if(t2p->pdf_colorspace & T2P_CS_CALRGB){
written += t2p_write_pdf_xobject_calcs(t2p, output);
} else {
written += t2pWriteFile(output, (tdata_t) "/DeviceRGB \n", 12);
}
}
if(t2p->pdf_colorspace & T2P_CS_CMYK){
written += t2pWriteFile(output, (tdata_t) "/DeviceCMYK \n", 13);
}
if(t2p->pdf_colorspace & T2P_CS_LAB){
written += t2pWriteFile(output, (tdata_t) "[/Lab << \n", 10);
written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12);
X_W = t2p->tiff_whitechromaticities[0];
Y_W = t2p->tiff_whitechromaticities[1];
Z_W = 1.0F - (X_W + Y_W);
X_W /= Y_W;
Z_W /= Y_W;
Y_W = 1.0F;
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Range ", 7);
buflen=snprintf(buffer, sizeof(buffer), "[%d %d %d %d] \n",
t2p->pdf_labrange[0],
t2p->pdf_labrange[1],
t2p->pdf_labrange[2],
t2p->pdf_labrange[3]);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) ">>] \n", 5);
}
return(written);
}
tsize_t t2p_write_pdf_transfer(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "<< /Type /ExtGState \n/TR ", 25);
if(t2p->tiff_transferfunctioncount == 1){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
} else {
written += t2pWriteFile(output, (tdata_t) "[ ", 2);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 2));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)(t2p->pdf_xrefcount + 3));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R ", 5);
written += t2pWriteFile(output, (tdata_t) "/Identity ] ", 12);
}
written += t2pWriteFile(output, (tdata_t) " >> \n", 5);
return(written);
}
tsize_t t2p_write_pdf_transfer_dict(T2P* t2p, TIFF* output, uint16 i){
tsize_t written=0;
char buffer[32];
int buflen=0;
(void)i; /* XXX */
written += t2pWriteFile(output, (tdata_t) "/FunctionType 0 \n", 17);
written += t2pWriteFile(output, (tdata_t) "/Domain [0.0 1.0] \n", 19);
written += t2pWriteFile(output, (tdata_t) "/Range [0.0 1.0] \n", 18);
buflen=snprintf(buffer, sizeof(buffer), "/Size [%u] \n", (1<<t2p->tiff_bitspersample));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/BitsPerSample 16 \n", 19);
written += t2p_write_pdf_stream_dict(((tsize_t)1)<<(t2p->tiff_bitspersample+1), 0, output);
return(written);
}
tsize_t t2p_write_pdf_transfer_stream(T2P* t2p, TIFF* output, uint16 i){
tsize_t written=0;
written += t2p_write_pdf_stream(
t2p->tiff_transferfunction[i],
(((tsize_t)1)<<(t2p->tiff_bitspersample+1)),
output);
return(written);
}
/*
This function writes a PDF Image XObject Colorspace array to output.
*/
tsize_t t2p_write_pdf_xobject_calcs(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[256];
int buflen=0;
float X_W=0.0;
float Y_W=0.0;
float Z_W=0.0;
float X_R=0.0;
float Y_R=0.0;
float Z_R=0.0;
float X_G=0.0;
float Y_G=0.0;
float Z_G=0.0;
float X_B=0.0;
float Y_B=0.0;
float Z_B=0.0;
float x_w=0.0;
float y_w=0.0;
float z_w=0.0;
float x_r=0.0;
float y_r=0.0;
float x_g=0.0;
float y_g=0.0;
float x_b=0.0;
float y_b=0.0;
float R=1.0;
float G=1.0;
float B=1.0;
written += t2pWriteFile(output, (tdata_t) "[", 1);
if(t2p->pdf_colorspace & T2P_CS_CALGRAY){
written += t2pWriteFile(output, (tdata_t) "/CalGray ", 9);
X_W = t2p->tiff_whitechromaticities[0];
Y_W = t2p->tiff_whitechromaticities[1];
Z_W = 1.0F - (X_W + Y_W);
X_W /= Y_W;
Z_W /= Y_W;
Y_W = 1.0F;
}
if(t2p->pdf_colorspace & T2P_CS_CALRGB){
written += t2pWriteFile(output, (tdata_t) "/CalRGB ", 8);
x_w = t2p->tiff_whitechromaticities[0];
y_w = t2p->tiff_whitechromaticities[1];
x_r = t2p->tiff_primarychromaticities[0];
y_r = t2p->tiff_primarychromaticities[1];
x_g = t2p->tiff_primarychromaticities[2];
y_g = t2p->tiff_primarychromaticities[3];
x_b = t2p->tiff_primarychromaticities[4];
y_b = t2p->tiff_primarychromaticities[5];
z_w = y_w * ((x_g - x_b)*y_r - (x_r-x_b)*y_g + (x_r-x_g)*y_b);
Y_R = (y_r/R) * ((x_g-x_b)*y_w - (x_w-x_b)*y_g + (x_w-x_g)*y_b) / z_w;
X_R = Y_R * x_r / y_r;
Z_R = Y_R * (((1-x_r)/y_r)-1);
Y_G = ((0.0F-(y_g))/G) * ((x_r-x_b)*y_w - (x_w-x_b)*y_r + (x_w-x_r)*y_b) / z_w;
X_G = Y_G * x_g / y_g;
Z_G = Y_G * (((1-x_g)/y_g)-1);
Y_B = (y_b/B) * ((x_r-x_g)*y_w - (x_w-x_g)*y_r + (x_w-x_r)*y_g) / z_w;
X_B = Y_B * x_b / y_b;
Z_B = Y_B * (((1-x_b)/y_b)-1);
X_W = (X_R * R) + (X_G * G) + (X_B * B);
Y_W = (Y_R * R) + (Y_G * G) + (Y_B * B);
Z_W = (Z_R * R) + (Z_G * G) + (Z_B * B);
X_W /= Y_W;
Z_W /= Y_W;
Y_W = 1.0;
}
written += t2pWriteFile(output, (tdata_t) "<< \n", 4);
if(t2p->pdf_colorspace & T2P_CS_CALGRAY){
written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12);
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Gamma 2.2 \n", 12);
}
if(t2p->pdf_colorspace & T2P_CS_CALRGB){
written += t2pWriteFile(output, (tdata_t) "/WhitePoint ", 12);
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f] \n", X_W, Y_W, Z_W);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Matrix ", 8);
buflen=snprintf(buffer, sizeof(buffer), "[%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f] \n",
X_R, Y_R, Z_R,
X_G, Y_G, Z_G,
X_B, Y_B, Z_B);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Gamma [2.2 2.2 2.2] \n", 22);
}
written += t2pWriteFile(output, (tdata_t) ">>] \n", 5);
return(written);
}
/*
This function writes a PDF Image XObject Colorspace array to output.
*/
tsize_t t2p_write_pdf_xobject_icccs(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "[/ICCBased ", 11);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_icccs);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R] \n", 7);
return(written);
}
tsize_t t2p_write_pdf_xobject_icccs_dict(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2pWriteFile(output, (tdata_t) "/N ", 3);
buflen=snprintf(buffer, sizeof(buffer), "%u \n", t2p->tiff_samplesperpixel);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "/Alternate ", 11);
t2p->pdf_colorspace ^= T2P_CS_ICCBASED;
written += t2p_write_pdf_xobject_cs(t2p, output);
t2p->pdf_colorspace |= T2P_CS_ICCBASED;
written += t2p_write_pdf_stream_dict(t2p->tiff_iccprofilelength, 0, output);
return(written);
}
tsize_t t2p_write_pdf_xobject_icccs_stream(T2P* t2p, TIFF* output){
tsize_t written=0;
written += t2p_write_pdf_stream(
(tdata_t) t2p->tiff_iccprofile,
(tsize_t) t2p->tiff_iccprofilelength,
output);
return(written);
}
/*
This function writes a palette stream for an indexed color space to output.
*/
tsize_t t2p_write_pdf_xobject_palettecs_stream(T2P* t2p, TIFF* output){
tsize_t written=0;
written += t2p_write_pdf_stream(
(tdata_t) t2p->pdf_palette,
(tsize_t) t2p->pdf_palettesize,
output);
return(written);
}
/*
This function writes a PDF Image XObject Decode array to output.
*/
tsize_t t2p_write_pdf_xobject_decode(T2P* t2p, TIFF* output){
tsize_t written=0;
int i=0;
written += t2pWriteFile(output, (tdata_t) "/Decode [ ", 10);
for (i=0;i<t2p->tiff_samplesperpixel;i++){
written += t2pWriteFile(output, (tdata_t) "1 0 ", 4);
}
written += t2pWriteFile(output, (tdata_t) "]\n", 2);
return(written);
}
/*
This function writes a PDF Image XObject stream filter name and parameters to
output.
*/
tsize_t t2p_write_pdf_xobject_stream_filter(ttile_t tile, T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
if(t2p->pdf_compression==T2P_COMPRESS_NONE){
return(written);
}
written += t2pWriteFile(output, (tdata_t) "/Filter ", 8);
switch(t2p->pdf_compression){
#ifdef CCITT_SUPPORT
case T2P_COMPRESS_G4:
written += t2pWriteFile(output, (tdata_t) "/CCITTFaxDecode ", 16);
written += t2pWriteFile(output, (tdata_t) "/DecodeParms ", 13);
written += t2pWriteFile(output, (tdata_t) "<< /K -1 ", 9);
if(tile==0){
written += t2pWriteFile(output, (tdata_t) "/Columns ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_width);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /Rows ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_length);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
} else {
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)==0){
written += t2pWriteFile(output, (tdata_t) "/Columns ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
} else {
written += t2pWriteFile(output, (tdata_t) "/Columns ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)==0){
written += t2pWriteFile(output, (tdata_t) " /Rows ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
} else {
written += t2pWriteFile(output, (tdata_t) " /Rows ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
}
if(t2p->pdf_switchdecode == 0){
written += t2pWriteFile(output, (tdata_t) " /BlackIs1 true ", 16);
}
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
break;
#endif
#ifdef JPEG_SUPPORT
case T2P_COMPRESS_JPEG:
written += t2pWriteFile(output, (tdata_t) "/DCTDecode ", 11);
if(t2p->tiff_photometric != PHOTOMETRIC_YCBCR) {
written += t2pWriteFile(output, (tdata_t) "/DecodeParms ", 13);
written += t2pWriteFile(output, (tdata_t) "<< /ColorTransform 1 >>\n", 24);
}
break;
#endif
#ifdef ZIP_SUPPORT
case T2P_COMPRESS_ZIP:
written += t2pWriteFile(output, (tdata_t) "/FlateDecode ", 13);
if(t2p->pdf_compressionquality%100){
written += t2pWriteFile(output, (tdata_t) "/DecodeParms ", 13);
written += t2pWriteFile(output, (tdata_t) "<< /Predictor ", 14);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_compressionquality%100);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /Columns ", 10);
buflen = snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_width);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /Colors ", 9);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_samplesperpixel);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " /BitsPerComponent ", 19);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_bitspersample);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) ">>\n", 3);
}
break;
#endif
default:
break;
}
return(written);
}
/*
This function writes a PDF xref table to output.
*/
tsize_t t2p_write_pdf_xreftable(T2P* t2p, TIFF* output){
tsize_t written=0;
char buffer[64];
int buflen=0;
uint32 i=0;
written += t2pWriteFile(output, (tdata_t) "xref\n0 ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(t2p->pdf_xrefcount + 1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " \n0000000000 65535 f \n", 22);
for (i=0;i<t2p->pdf_xrefcount;i++){
snprintf(buffer, sizeof(buffer), "%.10lu 00000 n \n",
(unsigned long)t2p->pdf_xrefoffsets[i]);
written += t2pWriteFile(output, (tdata_t) buffer, 20);
}
return(written);
}
/*
* This function writes a PDF trailer to output.
*/
tsize_t t2p_write_pdf_trailer(T2P* t2p, TIFF* output)
{
tsize_t written = 0;
char buffer[32];
int buflen = 0;
size_t i = 0;
for (i = 0; i < sizeof(t2p->pdf_fileid) - 8; i += 8)
snprintf(t2p->pdf_fileid + i, 9, "%.8X", rand());
written += t2pWriteFile(output, (tdata_t) "trailer\n<<\n/Size ", 17);
buflen = snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)(t2p->pdf_xrefcount+1));
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Root ", 7);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_catalog);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/Info ", 12);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_info);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) " 0 R \n/ID[<", 11);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) "><", 2);
written += t2pWriteFile(output, (tdata_t) t2p->pdf_fileid,
sizeof(t2p->pdf_fileid) - 1);
written += t2pWriteFile(output, (tdata_t) ">]\n>>\nstartxref\n", 16);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->pdf_startxref);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n%%EOF\n", 7);
return(written);
}
/*
This function writes a PDF to a file given a pointer to a TIFF.
The idea with using a TIFF* as output for a PDF file is that the file
can be created with TIFFClientOpen for memory-mapped use within the TIFF
library, and TIFFWriteEncodedStrip can be used to write compressed data to
the output. The output is not actually a TIFF file, it is a PDF file.
This function uses only t2pWriteFile and TIFFWriteEncodedStrip to write to
the output TIFF file. When libtiff would otherwise be writing data to the
output file, the write procedure of the TIFF structure is replaced with an
empty implementation.
The first argument to the function is an initialized and validated T2P
context struct pointer.
The second argument to the function is the TIFF* that is the input that has
been opened for reading and no other functions have been called upon it.
The third argument to the function is the TIFF* that is the output that has
been opened for writing. It has to be opened so that it hasn't written any
data to the output. If the output is seekable then it's OK to seek to the
beginning of the file. The function only writes to the output PDF and does
not seek. See the example usage in the main() function.
TIFF* output = TIFFOpen("output.pdf", "w");
assert(output != NULL);
if(output->tif_seekproc != NULL){
t2pSeekFile(output, (toff_t) 0, SEEK_SET);
}
This function returns the file size of the output PDF file. On error it
returns zero and the t2p->t2p_error variable is set to T2P_ERR_ERROR.
After this function completes, call t2p_free on t2p, TIFFClose on input,
and TIFFClose on output.
*/
tsize_t t2p_write_pdf(T2P* t2p, TIFF* input, TIFF* output){
tsize_t written=0;
ttile_t i2=0;
tsize_t streamlen=0;
uint16 i=0;
t2p_read_tiff_init(t2p, input);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
t2p->pdf_xrefoffsets= (uint32*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->pdf_xrefcount,sizeof(uint32)) );
if(t2p->pdf_xrefoffsets==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate %u bytes of memory for t2p_write_pdf",
(unsigned int) (t2p->pdf_xrefcount * sizeof(uint32)) );
t2p->t2p_error = T2P_ERR_ERROR;
return(written);
}
t2p->pdf_xrefcount=0;
t2p->pdf_catalog=1;
t2p->pdf_info=2;
t2p->pdf_pages=3;
written += t2p_write_pdf_header(t2p, output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_catalog=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_catalog(t2p, output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_info=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_info(t2p, input, output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_pages=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_pages(t2p, output);
written += t2p_write_pdf_obj_end(output);
for(t2p->pdf_page=0;t2p->pdf_page<t2p->tiff_pagecount;t2p->pdf_page++){
t2p_read_tiff_data(t2p, input);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_page(t2p->pdf_xrefcount, t2p, output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_stream_dict(0, t2p->pdf_xrefcount+1, output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
streamlen=written;
written += t2p_write_pdf_page_content_stream(t2p, output);
streamlen=written-streamlen;
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_length(streamlen, output);
written += t2p_write_pdf_obj_end(output);
if(t2p->tiff_transferfunctioncount != 0){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_transfer(t2p, output);
written += t2p_write_pdf_obj_end(output);
for(i=0; i < t2p->tiff_transferfunctioncount; i++){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_transfer_dict(t2p, output, i);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
/* streamlen=written; */ /* value not used */
written += t2p_write_pdf_transfer_stream(t2p, output, i);
/* streamlen=written-streamlen; */ /* value not used */
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
}
}
if( (t2p->pdf_colorspace & T2P_CS_PALETTE) != 0){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_palettecs=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_stream_dict(t2p->pdf_palettesize, 0, output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
/* streamlen=written; */ /* value not used */
written += t2p_write_pdf_xobject_palettecs_stream(t2p, output);
/* streamlen=written-streamlen; */ /* value not used */
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
}
if( (t2p->pdf_colorspace & T2P_CS_ICCBASED) != 0){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
t2p->pdf_icccs=t2p->pdf_xrefcount;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_xobject_icccs_dict(t2p, output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
/* streamlen=written; */ /* value not used */
written += t2p_write_pdf_xobject_icccs_stream(t2p, output);
/* streamlen=written-streamlen; */ /* value not used */
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
}
if(t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount !=0){
for(i2=0;i2<t2p->tiff_tiles[t2p->pdf_page].tiles_tilecount;i2++){
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_xobject_stream_dict(
i2+1,
t2p,
output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
streamlen=written;
t2p_read_tiff_size_tile(t2p, input, i2);
written += t2p_readwrite_pdf_image_tile(t2p, input, output, i2);
t2p_write_advance_directory(t2p, output);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
streamlen=written-streamlen;
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_length(streamlen, output);
written += t2p_write_pdf_obj_end(output);
}
} else {
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_dict_start(output);
written += t2p_write_pdf_xobject_stream_dict(
0,
t2p,
output);
written += t2p_write_pdf_stream_dict_end(output);
written += t2p_write_pdf_stream_start(output);
streamlen=written;
t2p_read_tiff_size(t2p, input);
written += t2p_readwrite_pdf_image(t2p, input, output);
t2p_write_advance_directory(t2p, output);
if(t2p->t2p_error!=T2P_ERR_OK){return(0);}
streamlen=written-streamlen;
written += t2p_write_pdf_stream_end(output);
written += t2p_write_pdf_obj_end(output);
t2p->pdf_xrefoffsets[t2p->pdf_xrefcount++]=written;
written += t2p_write_pdf_obj_start(t2p->pdf_xrefcount, output);
written += t2p_write_pdf_stream_length(streamlen, output);
written += t2p_write_pdf_obj_end(output);
}
}
t2p->pdf_startxref = written;
written += t2p_write_pdf_xreftable(t2p, output);
written += t2p_write_pdf_trailer(t2p, output);
t2p_disable(output);
return(written);
}
/* vim: set ts=8 sts=8 sw=8 noet: */
/*
* Local Variables:
* mode: c
* c-basic-offset: 8
* fill-column: 78
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_4800_1 |
crossvul-cpp_data_bad_2124_0 | /*
* Linux Socket Filter - Kernel level socket filtering
*
* Based on the design of the Berkeley Packet Filter. The new
* internal format has been designed by PLUMgrid:
*
* Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
*
* Authors:
*
* Jay Schulist <jschlst@samba.org>
* Alexei Starovoitov <ast@plumgrid.com>
* Daniel Borkmann <dborkman@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Andi Kleen - Fix a few bad bugs and races.
* Kris Katterjohn - Added many additional checks in sk_chk_filter()
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/gfp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
/* No hurry in this branch
*
* Exported for the bpf jit load helper.
*/
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
{
u8 *ptr = NULL;
if (k >= SKF_NET_OFF)
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
else if (k >= SKF_LL_OFF)
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
return ptr;
return NULL;
}
static inline void *load_pointer(const struct sk_buff *skb, int k,
unsigned int size, void *buffer)
{
if (k >= 0)
return skb_header_pointer(skb, k, size, buffer);
return bpf_internal_load_pointer_neg_helper(skb, k, size);
}
/**
* sk_filter - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
*
* Run the filter code and then cut skb->data to correct size returned by
* sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
* than pkt_len we keep whole skb->data. This is the socket level
* wrapper to sk_run_filter. It returns 0 if the packet should
* be accepted or -EPERM if the packet should be tossed.
*
*/
int sk_filter(struct sock *sk, struct sk_buff *skb)
{
int err;
struct sk_filter *filter;
/*
* If the skb was allocated from pfmemalloc reserves, only
* allow SOCK_MEMALLOC sockets to use it as this socket is
* helping free memory
*/
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
return -ENOMEM;
err = security_sock_rcv_skb(sk, skb);
if (err)
return err;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(sk_filter);
/* Base function for offset calculation. Needs to go into .text section,
* therefore keeping it non-static as well; will also be used by JITs
* anyway later on, so do not let the compiler omit it.
*/
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
return 0;
}
/**
* __sk_run_filter - run a filter on a given context
* @ctx: buffer to run the filter on
* @insn: filter to apply
*
* Decode and apply filter instructions to the skb->data. Return length to
* keep, 0 for none. @ctx is the data we are operating on, @insn is the
* array of filter instructions.
*/
unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
{
u64 stack[MAX_BPF_STACK / sizeof(u64)];
u64 regs[MAX_BPF_REG], tmp;
void *ptr;
int off;
#define K insn->imm
#define A regs[insn->a_reg]
#define X regs[insn->x_reg]
#define R0 regs[0]
#define CONT ({insn++; goto select_insn; })
#define CONT_JMP ({insn++; goto select_insn; })
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C
DL(BPF_ALU, BPF_ADD, BPF_X),
DL(BPF_ALU, BPF_ADD, BPF_K),
DL(BPF_ALU, BPF_SUB, BPF_X),
DL(BPF_ALU, BPF_SUB, BPF_K),
DL(BPF_ALU, BPF_AND, BPF_X),
DL(BPF_ALU, BPF_AND, BPF_K),
DL(BPF_ALU, BPF_OR, BPF_X),
DL(BPF_ALU, BPF_OR, BPF_K),
DL(BPF_ALU, BPF_LSH, BPF_X),
DL(BPF_ALU, BPF_LSH, BPF_K),
DL(BPF_ALU, BPF_RSH, BPF_X),
DL(BPF_ALU, BPF_RSH, BPF_K),
DL(BPF_ALU, BPF_XOR, BPF_X),
DL(BPF_ALU, BPF_XOR, BPF_K),
DL(BPF_ALU, BPF_MUL, BPF_X),
DL(BPF_ALU, BPF_MUL, BPF_K),
DL(BPF_ALU, BPF_MOV, BPF_X),
DL(BPF_ALU, BPF_MOV, BPF_K),
DL(BPF_ALU, BPF_DIV, BPF_X),
DL(BPF_ALU, BPF_DIV, BPF_K),
DL(BPF_ALU, BPF_MOD, BPF_X),
DL(BPF_ALU, BPF_MOD, BPF_K),
DL(BPF_ALU, BPF_NEG, 0),
DL(BPF_ALU, BPF_END, BPF_TO_BE),
DL(BPF_ALU, BPF_END, BPF_TO_LE),
DL(BPF_ALU64, BPF_ADD, BPF_X),
DL(BPF_ALU64, BPF_ADD, BPF_K),
DL(BPF_ALU64, BPF_SUB, BPF_X),
DL(BPF_ALU64, BPF_SUB, BPF_K),
DL(BPF_ALU64, BPF_AND, BPF_X),
DL(BPF_ALU64, BPF_AND, BPF_K),
DL(BPF_ALU64, BPF_OR, BPF_X),
DL(BPF_ALU64, BPF_OR, BPF_K),
DL(BPF_ALU64, BPF_LSH, BPF_X),
DL(BPF_ALU64, BPF_LSH, BPF_K),
DL(BPF_ALU64, BPF_RSH, BPF_X),
DL(BPF_ALU64, BPF_RSH, BPF_K),
DL(BPF_ALU64, BPF_XOR, BPF_X),
DL(BPF_ALU64, BPF_XOR, BPF_K),
DL(BPF_ALU64, BPF_MUL, BPF_X),
DL(BPF_ALU64, BPF_MUL, BPF_K),
DL(BPF_ALU64, BPF_MOV, BPF_X),
DL(BPF_ALU64, BPF_MOV, BPF_K),
DL(BPF_ALU64, BPF_ARSH, BPF_X),
DL(BPF_ALU64, BPF_ARSH, BPF_K),
DL(BPF_ALU64, BPF_DIV, BPF_X),
DL(BPF_ALU64, BPF_DIV, BPF_K),
DL(BPF_ALU64, BPF_MOD, BPF_X),
DL(BPF_ALU64, BPF_MOD, BPF_K),
DL(BPF_ALU64, BPF_NEG, 0),
DL(BPF_JMP, BPF_CALL, 0),
DL(BPF_JMP, BPF_JA, 0),
DL(BPF_JMP, BPF_JEQ, BPF_X),
DL(BPF_JMP, BPF_JEQ, BPF_K),
DL(BPF_JMP, BPF_JNE, BPF_X),
DL(BPF_JMP, BPF_JNE, BPF_K),
DL(BPF_JMP, BPF_JGT, BPF_X),
DL(BPF_JMP, BPF_JGT, BPF_K),
DL(BPF_JMP, BPF_JGE, BPF_X),
DL(BPF_JMP, BPF_JGE, BPF_K),
DL(BPF_JMP, BPF_JSGT, BPF_X),
DL(BPF_JMP, BPF_JSGT, BPF_K),
DL(BPF_JMP, BPF_JSGE, BPF_X),
DL(BPF_JMP, BPF_JSGE, BPF_K),
DL(BPF_JMP, BPF_JSET, BPF_X),
DL(BPF_JMP, BPF_JSET, BPF_K),
DL(BPF_JMP, BPF_EXIT, 0),
DL(BPF_STX, BPF_MEM, BPF_B),
DL(BPF_STX, BPF_MEM, BPF_H),
DL(BPF_STX, BPF_MEM, BPF_W),
DL(BPF_STX, BPF_MEM, BPF_DW),
DL(BPF_STX, BPF_XADD, BPF_W),
DL(BPF_STX, BPF_XADD, BPF_DW),
DL(BPF_ST, BPF_MEM, BPF_B),
DL(BPF_ST, BPF_MEM, BPF_H),
DL(BPF_ST, BPF_MEM, BPF_W),
DL(BPF_ST, BPF_MEM, BPF_DW),
DL(BPF_LDX, BPF_MEM, BPF_B),
DL(BPF_LDX, BPF_MEM, BPF_H),
DL(BPF_LDX, BPF_MEM, BPF_W),
DL(BPF_LDX, BPF_MEM, BPF_DW),
DL(BPF_LD, BPF_ABS, BPF_W),
DL(BPF_LD, BPF_ABS, BPF_H),
DL(BPF_LD, BPF_ABS, BPF_B),
DL(BPF_LD, BPF_IND, BPF_W),
DL(BPF_LD, BPF_IND, BPF_H),
DL(BPF_LD, BPF_IND, BPF_B),
#undef DL
};
regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
regs[ARG1_REG] = (u64) (unsigned long) ctx;
select_insn:
goto *jumptable[insn->code];
/* ALU */
#define ALU(OPCODE, OP) \
BPF_ALU64_##OPCODE##_BPF_X: \
A = A OP X; \
CONT; \
BPF_ALU_##OPCODE##_BPF_X: \
A = (u32) A OP (u32) X; \
CONT; \
BPF_ALU64_##OPCODE##_BPF_K: \
A = A OP K; \
CONT; \
BPF_ALU_##OPCODE##_BPF_K: \
A = (u32) A OP (u32) K; \
CONT;
ALU(BPF_ADD, +)
ALU(BPF_SUB, -)
ALU(BPF_AND, &)
ALU(BPF_OR, |)
ALU(BPF_LSH, <<)
ALU(BPF_RSH, >>)
ALU(BPF_XOR, ^)
ALU(BPF_MUL, *)
#undef ALU
BPF_ALU_BPF_NEG_0:
A = (u32) -A;
CONT;
BPF_ALU64_BPF_NEG_0:
A = -A;
CONT;
BPF_ALU_BPF_MOV_BPF_X:
A = (u32) X;
CONT;
BPF_ALU_BPF_MOV_BPF_K:
A = (u32) K;
CONT;
BPF_ALU64_BPF_MOV_BPF_X:
A = X;
CONT;
BPF_ALU64_BPF_MOV_BPF_K:
A = K;
CONT;
BPF_ALU64_BPF_ARSH_BPF_X:
(*(s64 *) &A) >>= X;
CONT;
BPF_ALU64_BPF_ARSH_BPF_K:
(*(s64 *) &A) >>= K;
CONT;
BPF_ALU64_BPF_MOD_BPF_X:
if (unlikely(X == 0))
return 0;
tmp = A;
A = do_div(tmp, X);
CONT;
BPF_ALU_BPF_MOD_BPF_X:
if (unlikely(X == 0))
return 0;
tmp = (u32) A;
A = do_div(tmp, (u32) X);
CONT;
BPF_ALU64_BPF_MOD_BPF_K:
tmp = A;
A = do_div(tmp, K);
CONT;
BPF_ALU_BPF_MOD_BPF_K:
tmp = (u32) A;
A = do_div(tmp, (u32) K);
CONT;
BPF_ALU64_BPF_DIV_BPF_X:
if (unlikely(X == 0))
return 0;
do_div(A, X);
CONT;
BPF_ALU_BPF_DIV_BPF_X:
if (unlikely(X == 0))
return 0;
tmp = (u32) A;
do_div(tmp, (u32) X);
A = (u32) tmp;
CONT;
BPF_ALU64_BPF_DIV_BPF_K:
do_div(A, K);
CONT;
BPF_ALU_BPF_DIV_BPF_K:
tmp = (u32) A;
do_div(tmp, (u32) K);
A = (u32) tmp;
CONT;
BPF_ALU_BPF_END_BPF_TO_BE:
switch (K) {
case 16:
A = (__force u16) cpu_to_be16(A);
break;
case 32:
A = (__force u32) cpu_to_be32(A);
break;
case 64:
A = (__force u64) cpu_to_be64(A);
break;
}
CONT;
BPF_ALU_BPF_END_BPF_TO_LE:
switch (K) {
case 16:
A = (__force u16) cpu_to_le16(A);
break;
case 32:
A = (__force u32) cpu_to_le32(A);
break;
case 64:
A = (__force u64) cpu_to_le64(A);
break;
}
CONT;
/* CALL */
BPF_JMP_BPF_CALL_0:
/* Function call scratches R1-R5 registers, preserves R6-R9,
* and stores return value into R0.
*/
R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
regs[4], regs[5]);
CONT;
/* JMP */
BPF_JMP_BPF_JA_0:
insn += insn->off;
CONT;
BPF_JMP_BPF_JEQ_BPF_X:
if (A == X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JEQ_BPF_K:
if (A == K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JNE_BPF_X:
if (A != X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JNE_BPF_K:
if (A != K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGT_BPF_X:
if (A > X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGT_BPF_K:
if (A > K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGE_BPF_X:
if (A >= X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JGE_BPF_K:
if (A >= K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGT_BPF_X:
if (((s64)A) > ((s64)X)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGT_BPF_K:
if (((s64)A) > ((s64)K)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGE_BPF_X:
if (((s64)A) >= ((s64)X)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSGE_BPF_K:
if (((s64)A) >= ((s64)K)) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSET_BPF_X:
if (A & X) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_JSET_BPF_K:
if (A & K) {
insn += insn->off;
CONT_JMP;
}
CONT;
BPF_JMP_BPF_EXIT_0:
return R0;
/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE) \
BPF_STX_BPF_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = X; \
CONT; \
BPF_ST_BPF_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = K; \
CONT; \
BPF_LDX_BPF_MEM_##SIZEOP: \
A = *(SIZE *)(unsigned long) (X + insn->off); \
CONT;
LDST(BPF_B, u8)
LDST(BPF_H, u16)
LDST(BPF_W, u32)
LDST(BPF_DW, u64)
#undef LDST
BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
atomic_add((u32) X, (atomic_t *)(unsigned long)
(A + insn->off));
CONT;
BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
atomic64_add((u64) X, (atomic64_t *)(unsigned long)
(A + insn->off));
CONT;
BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
off = K;
load_word:
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
* appearing in the programs where ctx == skb. All programs
* keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
* saves it in R6, internal BPF verifier will check that
* R6 == ctx.
*
* BPF_ABS and BPF_IND are wrappers of function calls, so
* they scratch R1-R5 registers, preserve R6-R9, and store
* return value into R0.
*
* Implicit input:
* ctx
*
* Explicit input:
* X == any register
* K == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
if (likely(ptr != NULL)) {
R0 = get_unaligned_be32(ptr);
CONT;
}
return 0;
BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
off = K;
load_half:
ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
if (likely(ptr != NULL)) {
R0 = get_unaligned_be16(ptr);
CONT;
}
return 0;
BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
off = K;
load_byte:
ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
if (likely(ptr != NULL)) {
R0 = *(u8 *)ptr;
CONT;
}
return 0;
BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
off = K + X;
goto load_word;
BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
off = K + X;
goto load_half;
BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
off = K + X;
goto load_byte;
default_label:
/* If we ever reach this, we have a bug somewhere. */
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
return 0;
#undef CONT_JMP
#undef CONT
#undef R0
#undef X
#undef A
#undef K
}
u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
const struct sock_filter_int *insni)
__attribute__ ((alias ("__sk_run_filter")));
u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
const struct sock_filter_int *insni)
__attribute__ ((alias ("__sk_run_filter")));
EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
/* Helper to find the offset of pkt_type in sk_buff structure. We want
* to make sure its still a 3bit field starting at a byte boundary;
* taken from arch/x86/net/bpf_jit_comp.c.
*/
#define PKT_TYPE_MAX 7
static unsigned int pkt_type_offset(void)
{
struct sk_buff skb_probe = { .pkt_type = ~0, };
u8 *ct = (u8 *) &skb_probe;
unsigned int off;
for (off = 0; off < sizeof(struct sk_buff); off++) {
if (ct[off] == PKT_TYPE_MAX)
return off;
}
pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
return -1;
}
static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
return __skb_get_poff(skb);
}
static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *)(long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *) &skb->data[A];
if (nla->nla_len > A - skb->len)
return 0;
nla = nla_find_nested(nla, X);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
{
return raw_smp_processor_id();
}
/* Register mappings for user programs. */
#define A_REG 0
#define X_REG 7
#define TMP_REG 8
#define ARG2_REG 2
#define ARG3_REG 3
static bool convert_bpf_extensions(struct sock_filter *fp,
struct sock_filter_int **insnp)
{
struct sock_filter_int *insn = *insnp;
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PROTOCOL:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, protocol);
insn++;
/* A = ntohs(A) [emitting a nop or swap16] */
insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
insn->a_reg = A_REG;
insn->imm = 16;
break;
case SKF_AD_OFF + SKF_AD_PKTTYPE:
insn->code = BPF_LDX | BPF_MEM | BPF_B;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = pkt_type_offset();
if (insn->off < 0)
return false;
insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = PKT_TYPE_MAX;
break;
case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE:
if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
insn->code = BPF_LDX | BPF_MEM | BPF_DW;
else
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = TMP_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, dev);
insn++;
insn->code = BPF_JMP | BPF_JNE | BPF_K;
insn->a_reg = TMP_REG;
insn->imm = 0;
insn->off = 1;
insn++;
insn->code = BPF_JMP | BPF_EXIT;
insn++;
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
insn->a_reg = A_REG;
insn->x_reg = TMP_REG;
if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->off = offsetof(struct net_device, ifindex);
} else {
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->off = offsetof(struct net_device, type);
}
break;
case SKF_AD_OFF + SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, mark);
break;
case SKF_AD_OFF + SKF_AD_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, hash);
break;
case SKF_AD_OFF + SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, queue_mapping);
break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG:
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
insn->code = BPF_LDX | BPF_MEM | BPF_H;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, vlan_tci);
insn++;
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = ~VLAN_TAG_PRESENT;
} else {
insn->code = BPF_ALU | BPF_RSH | BPF_K;
insn->a_reg = A_REG;
insn->imm = 12;
insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = 1;
}
break;
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
case SKF_AD_OFF + SKF_AD_NLATTR:
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
case SKF_AD_OFF + SKF_AD_CPU:
/* arg1 = ctx */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = ARG1_REG;
insn->x_reg = CTX_REG;
insn++;
/* arg2 = A */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = ARG2_REG;
insn->x_reg = A_REG;
insn++;
/* arg3 = X */
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = ARG3_REG;
insn->x_reg = X_REG;
insn++;
/* Emit call(ctx, arg2=A, arg3=X) */
insn->code = BPF_JMP | BPF_CALL;
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
insn->imm = __skb_get_pay_offset - __bpf_call_base;
break;
case SKF_AD_OFF + SKF_AD_NLATTR:
insn->imm = __skb_get_nlattr - __bpf_call_base;
break;
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
insn->imm = __skb_get_nlattr_nest - __bpf_call_base;
break;
case SKF_AD_OFF + SKF_AD_CPU:
insn->imm = __get_raw_cpu_id - __bpf_call_base;
break;
}
break;
case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
insn->code = BPF_ALU | BPF_XOR | BPF_X;
insn->a_reg = A_REG;
insn->x_reg = X_REG;
break;
default:
/* This is just a dummy call to avoid letting the compiler
* evict __bpf_call_base() as an optimization. Placed here
* where no-one bothers.
*/
BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
return false;
}
*insnp = insn;
return true;
}
/**
* sk_convert_filter - convert filter program
* @prog: the user passed filter program
* @len: the length of the user passed filter program
* @new_prog: buffer where converted program will be stored
* @new_len: pointer to store length of converted program
*
* Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
* Conversion workflow:
*
* 1) First pass for calculating the new program length:
* sk_convert_filter(old_prog, old_len, NULL, &new_len)
*
* 2) 2nd pass to remap in two passes: 1st pass finds new
* jump offsets, 2nd pass remapping:
* new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
* sk_convert_filter(old_prog, old_len, new_prog, &new_len);
*
* User BPF's register A is mapped to our BPF register 6, user BPF
* register X is mapped to BPF register 7; frame pointer is always
* register 10; Context 'void *ctx' is stored in register 1, that is,
* for socket filters: ctx == 'struct sk_buff *', for seccomp:
* ctx == 'struct seccomp_data *'.
*/
int sk_convert_filter(struct sock_filter *prog, int len,
struct sock_filter_int *new_prog, int *new_len)
{
int new_flen = 0, pass = 0, target, i;
struct sock_filter_int *new_insn;
struct sock_filter *fp;
int *addrs = NULL;
u8 bpf_src;
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
if (len <= 0 || len >= BPF_MAXINSNS)
return -EINVAL;
if (new_prog) {
addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return -ENOMEM;
}
do_pass:
new_insn = new_prog;
fp = prog;
if (new_insn) {
new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
new_insn->a_reg = CTX_REG;
new_insn->x_reg = ARG1_REG;
}
new_insn++;
for (i = 0; i < len; fp++, i++) {
struct sock_filter_int tmp_insns[6] = { };
struct sock_filter_int *insn = tmp_insns;
if (addrs)
addrs[i] = new_insn - new_prog;
switch (fp->code) {
/* All arithmetic insns and skb loads map as-is. */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_NEG:
case BPF_LD | BPF_ABS | BPF_W:
case BPF_LD | BPF_ABS | BPF_H:
case BPF_LD | BPF_ABS | BPF_B:
case BPF_LD | BPF_IND | BPF_W:
case BPF_LD | BPF_IND | BPF_H:
case BPF_LD | BPF_IND | BPF_B:
/* Check for overloaded BPF extension and
* directly convert it if found, otherwise
* just move on with mapping.
*/
if (BPF_CLASS(fp->code) == BPF_LD &&
BPF_MODE(fp->code) == BPF_ABS &&
convert_bpf_extensions(fp, &insn))
break;
insn->code = fp->code;
insn->a_reg = A_REG;
insn->x_reg = X_REG;
insn->imm = fp->k;
break;
/* Jump opcodes map as-is, but offsets need adjustment. */
case BPF_JMP | BPF_JA:
target = i + fp->k + 1;
insn->code = fp->code;
#define EMIT_JMP \
do { \
if (target >= len || target < 0) \
goto err; \
insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
/* Adjust pc relative offset for 2nd or 3rd insn. */ \
insn->off -= insn - tmp_insns; \
} while (0)
EMIT_JMP;
break;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
/* BPF immediates are signed, zero extend
* immediate into tmp register and use it
* in compare insn.
*/
insn->code = BPF_ALU | BPF_MOV | BPF_K;
insn->a_reg = TMP_REG;
insn->imm = fp->k;
insn++;
insn->a_reg = A_REG;
insn->x_reg = TMP_REG;
bpf_src = BPF_X;
} else {
insn->a_reg = A_REG;
insn->x_reg = X_REG;
insn->imm = fp->k;
bpf_src = BPF_SRC(fp->code);
}
/* Common case where 'jump_false' is next insn. */
if (fp->jf == 0) {
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
target = i + fp->jt + 1;
EMIT_JMP;
break;
}
/* Convert JEQ into JNE when 'jump_true' is next insn. */
if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
insn->code = BPF_JMP | BPF_JNE | bpf_src;
target = i + fp->jf + 1;
EMIT_JMP;
break;
}
/* Other jumps are mapped into two insns: Jxx and JA. */
target = i + fp->jt + 1;
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
EMIT_JMP;
insn++;
insn->code = BPF_JMP | BPF_JA;
target = i + fp->jf + 1;
EMIT_JMP;
break;
/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = TMP_REG;
insn->x_reg = A_REG;
insn++;
insn->code = BPF_LD | BPF_ABS | BPF_B;
insn->a_reg = A_REG;
insn->imm = fp->k;
insn++;
insn->code = BPF_ALU | BPF_AND | BPF_K;
insn->a_reg = A_REG;
insn->imm = 0xf;
insn++;
insn->code = BPF_ALU | BPF_LSH | BPF_K;
insn->a_reg = A_REG;
insn->imm = 2;
insn++;
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = X_REG;
insn->x_reg = A_REG;
insn++;
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = A_REG;
insn->x_reg = TMP_REG;
break;
/* RET_K, RET_A are remaped into 2 insns. */
case BPF_RET | BPF_A:
case BPF_RET | BPF_K:
insn->code = BPF_ALU | BPF_MOV |
(BPF_RVAL(fp->code) == BPF_K ?
BPF_K : BPF_X);
insn->a_reg = 0;
insn->x_reg = A_REG;
insn->imm = fp->k;
insn++;
insn->code = BPF_JMP | BPF_EXIT;
break;
/* Store to stack. */
case BPF_ST:
case BPF_STX:
insn->code = BPF_STX | BPF_MEM | BPF_W;
insn->a_reg = FP_REG;
insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break;
/* Load from stack. */
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
A_REG : X_REG;
insn->x_reg = FP_REG;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break;
/* A = K or X = K */
case BPF_LD | BPF_IMM:
case BPF_LDX | BPF_IMM:
insn->code = BPF_ALU | BPF_MOV | BPF_K;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
A_REG : X_REG;
insn->imm = fp->k;
break;
/* X = A */
case BPF_MISC | BPF_TAX:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = X_REG;
insn->x_reg = A_REG;
break;
/* A = X */
case BPF_MISC | BPF_TXA:
insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
insn->a_reg = A_REG;
insn->x_reg = X_REG;
break;
/* A = skb->len or X = skb->len */
case BPF_LD | BPF_W | BPF_LEN:
case BPF_LDX | BPF_W | BPF_LEN:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
A_REG : X_REG;
insn->x_reg = CTX_REG;
insn->off = offsetof(struct sk_buff, len);
break;
/* access seccomp_data fields */
case BPF_LDX | BPF_ABS | BPF_W:
insn->code = BPF_LDX | BPF_MEM | BPF_W;
insn->a_reg = A_REG;
insn->x_reg = CTX_REG;
insn->off = fp->k;
break;
default:
goto err;
}
insn++;
if (new_prog)
memcpy(new_insn, tmp_insns,
sizeof(*insn) * (insn - tmp_insns));
new_insn += insn - tmp_insns;
}
if (!new_prog) {
/* Only calculating new length. */
*new_len = new_insn - new_prog;
return 0;
}
pass++;
if (new_flen != new_insn - new_prog) {
new_flen = new_insn - new_prog;
if (pass > 2)
goto err;
goto do_pass;
}
kfree(addrs);
BUG_ON(*new_len != new_flen);
return 0;
err:
kfree(addrs);
return -EINVAL;
}
/* Security:
*
* A BPF program is able to use 16 cells of memory to store intermediate
* values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
*
* As we dont want to clear mem[] array for each packet going through
* sk_run_filter(), we check that filter loaded by user never try to read
* a cell if not previously written, and we check all branches to be sure
* a malicious user doesn't try to abuse us.
*/
static int check_load_and_stores(struct sock_filter *filter, int flen)
{
u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) {
memvalid &= masks[pc];
switch (filter[pc].code) {
case BPF_S_ST:
case BPF_S_STX:
memvalid |= (1 << filter[pc].k);
break;
case BPF_S_LD_MEM:
case BPF_S_LDX_MEM:
if (!(memvalid & (1 << filter[pc].k))) {
ret = -EINVAL;
goto error;
}
break;
case BPF_S_JMP_JA:
/* a jump must set masks on target */
masks[pc + 1 + filter[pc].k] &= memvalid;
memvalid = ~0;
break;
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
/* a jump must set masks on targets */
masks[pc + 1 + filter[pc].jt] &= memvalid;
masks[pc + 1 + filter[pc].jf] &= memvalid;
memvalid = ~0;
break;
}
}
error:
kfree(masks);
return ret;
}
/**
* sk_chk_filter - verify socket filter code
* @filter: filter to verify
* @flen: length of filter
*
* Check the user's filter code. If we let some ugly
* filter code slip through kaboom! The filter must contain
* no references or jumps that are out of range, no illegal
* instructions, and must end with a RET instruction.
*
* All jumps are forward as they are not signed.
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
{
/*
* Valid instructions are initialized to non-0.
* Invalid instructions are initialized to 0.
*/
static const u8 codes[] = {
[BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
[BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
[BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
[BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
[BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
[BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
[BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
[BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
[BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
[BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
[BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
[BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
[BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
[BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
[BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
[BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
[BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
[BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
[BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
[BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
[BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
[BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
[BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
[BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
[BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
[BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
[BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
[BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
[BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
[BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
[BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
[BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
[BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
[BPF_RET|BPF_K] = BPF_S_RET_K,
[BPF_RET|BPF_A] = BPF_S_RET_A,
[BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
[BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
[BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
[BPF_ST] = BPF_S_ST,
[BPF_STX] = BPF_S_STX,
[BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
[BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
[BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
[BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
[BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
[BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
[BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
};
int pc;
bool anc_found;
if (flen == 0 || flen > BPF_MAXINSNS)
return -EINVAL;
/* check the filter code now */
for (pc = 0; pc < flen; pc++) {
struct sock_filter *ftest = &filter[pc];
u16 code = ftest->code;
if (code >= ARRAY_SIZE(codes))
return -EINVAL;
code = codes[code];
if (!code)
return -EINVAL;
/* Some instructions need special checks */
switch (code) {
case BPF_S_ALU_DIV_K:
case BPF_S_ALU_MOD_K:
/* check for division by zero */
if (ftest->k == 0)
return -EINVAL;
break;
case BPF_S_LD_MEM:
case BPF_S_LDX_MEM:
case BPF_S_ST:
case BPF_S_STX:
/* check for invalid memory addresses */
if (ftest->k >= BPF_MEMWORDS)
return -EINVAL;
break;
case BPF_S_JMP_JA:
/*
* Note, the large ftest->k might cause loops.
* Compare this with conditional jumps below,
* where offsets are limited. --ANK (981016)
*/
if (ftest->k >= (unsigned int)(flen-pc-1))
return -EINVAL;
break;
case BPF_S_JMP_JEQ_K:
case BPF_S_JMP_JEQ_X:
case BPF_S_JMP_JGE_K:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JSET_X:
case BPF_S_JMP_JSET_K:
/* for conditionals both must be safe */
if (pc + ftest->jt + 1 >= flen ||
pc + ftest->jf + 1 >= flen)
return -EINVAL;
break;
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
anc_found = false;
#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
code = BPF_S_ANC_##CODE; \
anc_found = true; \
break
switch (ftest->k) {
ANCILLARY(PROTOCOL);
ANCILLARY(PKTTYPE);
ANCILLARY(IFINDEX);
ANCILLARY(NLATTR);
ANCILLARY(NLATTR_NEST);
ANCILLARY(MARK);
ANCILLARY(QUEUE);
ANCILLARY(HATYPE);
ANCILLARY(RXHASH);
ANCILLARY(CPU);
ANCILLARY(ALU_XOR_X);
ANCILLARY(VLAN_TAG);
ANCILLARY(VLAN_TAG_PRESENT);
ANCILLARY(PAY_OFFSET);
}
/* ancillary operation unknown or unsupported */
if (anc_found == false && ftest->k >= SKF_AD_OFF)
return -EINVAL;
}
ftest->code = code;
}
/* last instruction must be a RET code */
switch (filter[flen - 1].code) {
case BPF_S_RET_K:
case BPF_S_RET_A:
return check_load_and_stores(filter, flen);
}
return -EINVAL;
}
EXPORT_SYMBOL(sk_chk_filter);
static int sk_store_orig_filter(struct sk_filter *fp,
const struct sock_fprog *fprog)
{
unsigned int fsize = sk_filter_proglen(fprog);
struct sock_fprog_kern *fkprog;
fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
if (!fp->orig_prog)
return -ENOMEM;
fkprog = fp->orig_prog;
fkprog->len = fprog->len;
fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
if (!fkprog->filter) {
kfree(fp->orig_prog);
return -ENOMEM;
}
return 0;
}
static void sk_release_orig_filter(struct sk_filter *fp)
{
struct sock_fprog_kern *fprog = fp->orig_prog;
if (fprog) {
kfree(fprog->filter);
kfree(fprog);
}
}
/**
* sk_filter_release_rcu - Release a socket filter by rcu_head
* @rcu: rcu_head that contains the sk_filter to free
*/
static void sk_filter_release_rcu(struct rcu_head *rcu)
{
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
sk_release_orig_filter(fp);
bpf_jit_free(fp);
}
/**
* sk_filter_release - release a socket filter
* @fp: filter to remove
*
* Remove a filter from a socket and release its resources.
*/
static void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
call_rcu(&fp->rcu, sk_filter_release_rcu);
}
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
sk_filter_release(fp);
}
void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_inc(&fp->refcnt);
atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
}
static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
struct sock *sk,
unsigned int len)
{
struct sk_filter *fp_new;
if (sk == NULL)
return krealloc(fp, len, GFP_KERNEL);
fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
if (fp_new) {
memcpy(fp_new, fp, sizeof(struct sk_filter));
/* As we're kepping orig_prog in fp_new along,
* we need to make sure we're not evicting it
* from the old fp.
*/
fp->orig_prog = NULL;
sk_filter_uncharge(sk, fp);
}
return fp_new;
}
static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
struct sock *sk)
{
struct sock_filter *old_prog;
struct sk_filter *old_fp;
int i, err, new_len, old_len = fp->len;
/* We are free to overwrite insns et al right here as it
* won't be used at this point in time anymore internally
* after the migration to the internal BPF instruction
* representation.
*/
BUILD_BUG_ON(sizeof(struct sock_filter) !=
sizeof(struct sock_filter_int));
/* For now, we need to unfiddle BPF_S_* identifiers in place.
* This can sooner or later on be subject to removal, e.g. when
* JITs have been converted.
*/
for (i = 0; i < fp->len; i++)
sk_decode_filter(&fp->insns[i], &fp->insns[i]);
/* Conversion cannot happen on overlapping memory areas,
* so we need to keep the user BPF around until the 2nd
* pass. At this time, the user BPF is stored in fp->insns.
*/
old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
GFP_KERNEL);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
}
/* 1st pass: calculate the new program length. */
err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
if (err)
goto out_err_free;
/* Expand fp for appending the new filter representation. */
old_fp = fp;
fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
if (!fp) {
/* The old_fp is still around in case we couldn't
* allocate new memory, so uncharge on that one.
*/
fp = old_fp;
err = -ENOMEM;
goto out_err_free;
}
fp->bpf_func = sk_run_filter_int_skb;
fp->len = new_len;
/* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
if (err)
/* 2nd sk_convert_filter() can fail only if it fails
* to allocate memory, remapping must succeed. Note,
* that at this time old_fp has already been released
* by __sk_migrate_realloc().
*/
goto out_err_free;
kfree(old_prog);
return fp;
out_err_free:
kfree(old_prog);
out_err:
/* Rollback filter setup. */
if (sk != NULL)
sk_filter_uncharge(sk, fp);
else
kfree(fp);
return ERR_PTR(err);
}
static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
struct sock *sk)
{
int err;
fp->bpf_func = NULL;
fp->jited = 0;
err = sk_chk_filter(fp->insns, fp->len);
if (err)
return ERR_PTR(err);
/* Probe if we can JIT compile the filter and if so, do
* the compilation of the filter.
*/
bpf_jit_compile(fp);
/* JIT compiler couldn't process this filter, so do the
* internal BPF translation for the optimized interpreter.
*/
if (!fp->jited)
fp = __sk_migrate_filter(fp, sk);
return fp;
}
/**
* sk_unattached_filter_create - create an unattached filter
* @fprog: the filter program
* @pfp: the unattached filter that is created
*
* Create a filter independent of any socket. We first run some
* sanity checks on it to make sure it does not explode on us later.
* If an error occurs or there is insufficient memory for the filter
* a negative errno code is returned. On success the return is zero.
*/
int sk_unattached_filter_create(struct sk_filter **pfp,
struct sock_fprog *fprog)
{
unsigned int fsize = sk_filter_proglen(fprog);
struct sk_filter *fp;
/* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL)
return -EINVAL;
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
if (!fp)
return -ENOMEM;
memcpy(fp->insns, fprog->filter, fsize);
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len;
/* Since unattached filters are not copied back to user
* space through sk_get_filter(), we do not need to hold
* a copy here, and can spare us the work.
*/
fp->orig_prog = NULL;
/* __sk_prepare_filter() already takes care of uncharging
* memory in case something goes wrong.
*/
fp = __sk_prepare_filter(fp, NULL);
if (IS_ERR(fp))
return PTR_ERR(fp);
*pfp = fp;
return 0;
}
EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
void sk_unattached_filter_destroy(struct sk_filter *fp)
{
sk_filter_release(fp);
}
EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
/**
* sk_attach_filter - attach a socket filter
* @fprog: the filter program
* @sk: the socket to use
*
* Attach the user's filter code. We first run some sanity checks on
* it to make sure it does not explode on us later. If an error
* occurs or there is insufficient memory for the filter a negative
* errno code is returned. On success the return is zero.
*/
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
unsigned int fsize = sk_filter_proglen(fprog);
unsigned int sk_fsize = sk_filter_size(fprog->len);
int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
/* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL)
return -EINVAL;
fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
sock_kfree_s(sk, fp, sk_fsize);
return -EFAULT;
}
atomic_set(&fp->refcnt, 1);
fp->len = fprog->len;
err = sk_store_orig_filter(fp, fprog);
if (err) {
sk_filter_uncharge(sk, fp);
return -ENOMEM;
}
/* __sk_prepare_filter() already takes care of uncharging
* memory in case something goes wrong.
*/
fp = __sk_prepare_filter(fp, sk);
if (IS_ERR(fp))
return PTR_ERR(fp);
old_fp = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
sk_filter_uncharge(sk, old_fp);
return 0;
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
struct sk_filter *filter;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (filter) {
RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
ret = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(sk_detach_filter);
void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
{
static const u16 decodes[] = {
[BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
[BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
[BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
[BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
[BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
[BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
[BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
[BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
[BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
[BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
[BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
[BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
[BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
[BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
[BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
[BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
[BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
[BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
[BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
[BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
[BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
[BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
[BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
[BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
[BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
[BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
[BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
[BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
[BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
[BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
[BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
[BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
[BPF_S_RET_K] = BPF_RET|BPF_K,
[BPF_S_RET_A] = BPF_RET|BPF_A,
[BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
[BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
[BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
[BPF_S_ST] = BPF_ST,
[BPF_S_STX] = BPF_STX,
[BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
[BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
[BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
[BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
[BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
[BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
[BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
[BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
[BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
};
u16 code;
code = filt->code;
to->code = decodes[code];
to->jt = filt->jt;
to->jf = filt->jf;
to->k = filt->k;
}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
unsigned int len)
{
struct sock_fprog_kern *fprog;
struct sk_filter *filter;
int ret = 0;
lock_sock(sk);
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (!filter)
goto out;
/* We're copying the filter that has been originally attached,
* so no conversion/decode needed anymore.
*/
fprog = filter->orig_prog;
ret = fprog->len;
if (!len)
/* User space only enquires number of filter blocks. */
goto out;
ret = -EINVAL;
if (len < fprog->len)
goto out;
ret = -EFAULT;
if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
goto out;
/* Instead of bytes, the API requests to return the number
* of filter blocks.
*/
ret = fprog->len;
out:
release_sock(sk);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2124_0 |
crossvul-cpp_data_good_2219_0 | /*
* Routines for driver control interface
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/threads.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/info.h>
#include <sound/control.h>
/* max number of user-defined controls */
#define MAX_USER_CONTROLS 32
#define MAX_CONTROL_COUNT 1028
struct snd_kctl_ioctl {
struct list_head list; /* list of all ioctls */
snd_kctl_ioctl_func_t fioctl;
};
static DECLARE_RWSEM(snd_ioctl_rwsem);
static LIST_HEAD(snd_control_ioctls);
#ifdef CONFIG_COMPAT
static LIST_HEAD(snd_control_compat_ioctls);
#endif
static int snd_ctl_open(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL);
if (!card) {
err = -ENODEV;
goto __error1;
}
err = snd_card_file_add(card, file);
if (err < 0) {
err = -ENODEV;
goto __error1;
}
if (!try_module_get(card->module)) {
err = -EFAULT;
goto __error2;
}
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (ctl == NULL) {
err = -ENOMEM;
goto __error;
}
INIT_LIST_HEAD(&ctl->events);
init_waitqueue_head(&ctl->change_sleep);
spin_lock_init(&ctl->read_lock);
ctl->card = card;
ctl->prefer_pcm_subdevice = -1;
ctl->prefer_rawmidi_subdevice = -1;
ctl->pid = get_pid(task_pid(current));
file->private_data = ctl;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_add_tail(&ctl->list, &card->ctl_files);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
snd_card_unref(card);
return 0;
__error:
module_put(card->module);
__error2:
snd_card_file_remove(card, file);
__error1:
if (card)
snd_card_unref(card);
return err;
}
static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl)
{
unsigned long flags;
struct snd_kctl_event *cread;
spin_lock_irqsave(&ctl->read_lock, flags);
while (!list_empty(&ctl->events)) {
cread = snd_kctl_event(ctl->events.next);
list_del(&cread->list);
kfree(cread);
}
spin_unlock_irqrestore(&ctl->read_lock, flags);
}
static int snd_ctl_release(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
struct snd_kcontrol *control;
unsigned int idx;
ctl = file->private_data;
file->private_data = NULL;
card = ctl->card;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_del(&ctl->list);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
down_write(&card->controls_rwsem);
list_for_each_entry(control, &card->controls, list)
for (idx = 0; idx < control->count; idx++)
if (control->vd[idx].owner == ctl)
control->vd[idx].owner = NULL;
up_write(&card->controls_rwsem);
snd_ctl_empty_read_queue(ctl);
put_pid(ctl->pid);
kfree(ctl);
module_put(card->module);
snd_card_file_remove(card, file);
return 0;
}
void snd_ctl_notify(struct snd_card *card, unsigned int mask,
struct snd_ctl_elem_id *id)
{
unsigned long flags;
struct snd_ctl_file *ctl;
struct snd_kctl_event *ev;
if (snd_BUG_ON(!card || !id))
return;
read_lock(&card->ctl_files_rwlock);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
card->mixer_oss_change_count++;
#endif
list_for_each_entry(ctl, &card->ctl_files, list) {
if (!ctl->subscribed)
continue;
spin_lock_irqsave(&ctl->read_lock, flags);
list_for_each_entry(ev, &ctl->events, list) {
if (ev->id.numid == id->numid) {
ev->mask |= mask;
goto _found;
}
}
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (ev) {
ev->id = *id;
ev->mask = mask;
list_add_tail(&ev->list, &ctl->events);
} else {
dev_err(card->dev, "No memory available to allocate event\n");
}
_found:
wake_up(&ctl->change_sleep);
spin_unlock_irqrestore(&ctl->read_lock, flags);
kill_fasync(&ctl->fasync, SIGIO, POLL_IN);
}
read_unlock(&card->ctl_files_rwlock);
}
EXPORT_SYMBOL(snd_ctl_notify);
/**
* snd_ctl_new - create a control instance from the template
* @control: the control template
* @access: the default control access
*
* Allocates a new struct snd_kcontrol instance and copies the given template
* to the new instance. It does not copy volatile data (access).
*
* Return: The pointer of the new instance, or %NULL on failure.
*/
static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
unsigned int access)
{
struct snd_kcontrol *kctl;
unsigned int idx;
if (snd_BUG_ON(!control || !control->count))
return NULL;
if (control->count > MAX_CONTROL_COUNT)
return NULL;
kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
if (kctl == NULL) {
pr_err("ALSA: Cannot allocate control instance\n");
return NULL;
}
*kctl = *control;
for (idx = 0; idx < kctl->count; idx++)
kctl->vd[idx].access = access;
return kctl;
}
/**
* snd_ctl_new1 - create a control instance from the template
* @ncontrol: the initialization record
* @private_data: the private data to set
*
* Allocates a new struct snd_kcontrol instance and initialize from the given
* template. When the access field of ncontrol is 0, it's assumed as
* READWRITE access. When the count field is 0, it's assumes as one.
*
* Return: The pointer of the newly generated instance, or %NULL on failure.
*/
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
void *private_data)
{
struct snd_kcontrol kctl;
unsigned int access;
if (snd_BUG_ON(!ncontrol || !ncontrol->info))
return NULL;
memset(&kctl, 0, sizeof(kctl));
kctl.id.iface = ncontrol->iface;
kctl.id.device = ncontrol->device;
kctl.id.subdevice = ncontrol->subdevice;
if (ncontrol->name) {
strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name));
if (strcmp(ncontrol->name, kctl.id.name) != 0)
pr_warn("ALSA: Control name '%s' truncated to '%s'\n",
ncontrol->name, kctl.id.name);
}
kctl.id.index = ncontrol->index;
kctl.count = ncontrol->count ? ncontrol->count : 1;
access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_VOLATILE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE|
SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND|
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK));
kctl.info = ncontrol->info;
kctl.get = ncontrol->get;
kctl.put = ncontrol->put;
kctl.tlv.p = ncontrol->tlv.p;
kctl.private_value = ncontrol->private_value;
kctl.private_data = private_data;
return snd_ctl_new(&kctl, access);
}
EXPORT_SYMBOL(snd_ctl_new1);
/**
* snd_ctl_free_one - release the control instance
* @kcontrol: the control instance
*
* Releases the control instance created via snd_ctl_new()
* or snd_ctl_new1().
* Don't call this after the control was added to the card.
*/
void snd_ctl_free_one(struct snd_kcontrol *kcontrol)
{
if (kcontrol) {
if (kcontrol->private_free)
kcontrol->private_free(kcontrol);
kfree(kcontrol);
}
}
EXPORT_SYMBOL(snd_ctl_free_one);
static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
unsigned int count)
{
struct snd_kcontrol *kctl;
/* Make sure that the ids assigned to the control do not wrap around */
if (card->last_numid >= UINT_MAX - count)
card->last_numid = 0;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
card->last_numid = kctl->id.numid + kctl->count - 1;
return true;
}
}
return false;
}
static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
{
unsigned int iter = 100000;
while (snd_ctl_remove_numid_conflict(card, count)) {
if (--iter == 0) {
/* this situation is very unlikely */
dev_err(card->dev, "unable to allocate new control numid\n");
return -ENOMEM;
}
}
return 0;
}
/**
* snd_ctl_add - add the control instance to the card
* @card: the card instance
* @kcontrol: the control instance to add
*
* Adds the control instance created via snd_ctl_new() or
* snd_ctl_new1() to the given card. Assigns also an unique
* numid used for fast search.
*
* It frees automatically the control which cannot be added.
*
* Return: Zero if successful, or a negative error code on failure.
*
*/
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
unsigned int count;
int err = -EINVAL;
if (! kcontrol)
return err;
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
id.iface,
id.device,
id.subdevice,
id.name,
id.index);
err = -EBUSY;
goto error;
}
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
err = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return err;
}
EXPORT_SYMBOL(snd_ctl_add);
/**
* snd_ctl_replace - replace the control instance of the card
* @card: the card instance
* @kcontrol: the control instance to replace
* @add_on_replace: add the control if not already added
*
* Replaces the given control. If the given control does not exist
* and the add_on_replace flag is set, the control is added. If the
* control exists, it is destroyed first.
*
* It frees automatically the control which cannot be added or replaced.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
bool add_on_replace)
{
struct snd_ctl_elem_id id;
unsigned int count;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
if (!kcontrol)
return -EINVAL;
if (snd_BUG_ON(!card || !kcontrol->info)) {
ret = -EINVAL;
goto error;
}
id = kcontrol->id;
down_write(&card->controls_rwsem);
old = snd_ctl_find_id(card, &id);
if (!old) {
if (add_on_replace)
goto add;
up_write(&card->controls_rwsem);
ret = -EINVAL;
goto error;
}
ret = snd_ctl_remove(card, old);
if (ret < 0) {
up_write(&card->controls_rwsem);
goto error;
}
add:
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
ret = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return ret;
}
EXPORT_SYMBOL(snd_ctl_replace);
/**
* snd_ctl_remove - remove the control from the card and release it
* @card: the card instance
* @kcontrol: the control instance to remove
*
* Removes the control from the card and then releases the instance.
* You don't need to call snd_ctl_free_one(). You must be in
* the write lock - down_write(&card->controls_rwsem).
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
if (snd_BUG_ON(!card || !kcontrol))
return -EINVAL;
list_del(&kcontrol->list);
card->controls_count -= kcontrol->count;
id = kcontrol->id;
for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id);
snd_ctl_free_one(kcontrol);
return 0;
}
EXPORT_SYMBOL(snd_ctl_remove);
/**
* snd_ctl_remove_id - remove the control of the given id and release it
* @card: the card instance
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
ret = snd_ctl_remove(card, kctl);
up_write(&card->controls_rwsem);
return ret;
}
EXPORT_SYMBOL(snd_ctl_remove_id);
/**
* snd_ctl_remove_user_ctl - remove and release the unlocked user control
* @file: active control handle
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file,
struct snd_ctl_elem_id *id)
{
struct snd_card *card = file->card;
struct snd_kcontrol *kctl;
int idx, ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto error;
}
if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) {
ret = -EINVAL;
goto error;
}
for (idx = 0; idx < kctl->count; idx++)
if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) {
ret = -EBUSY;
goto error;
}
ret = snd_ctl_remove(card, kctl);
if (ret < 0)
goto error;
card->user_ctl_count--;
error:
up_write(&card->controls_rwsem);
return ret;
}
/**
* snd_ctl_activate_id - activate/inactivate the control of the given id
* @card: the card instance
* @id: the control id to activate/inactivate
* @active: non-zero to activate
*
* Finds the control instance with the given id, and activate or
* inactivate the control together with notification, if changed.
*
* Return: 0 if unchanged, 1 if changed, or a negative error code on failure.
*/
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
int active)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto unlock;
}
index_offset = snd_ctl_get_ioff(kctl, &kctl->id);
vd = &kctl->vd[index_offset];
ret = 0;
if (active) {
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE))
goto unlock;
vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
} else {
if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)
goto unlock;
vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
}
ret = 1;
unlock:
up_write(&card->controls_rwsem);
if (ret > 0)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id);
return ret;
}
EXPORT_SYMBOL_GPL(snd_ctl_activate_id);
/**
* snd_ctl_rename_id - replace the id of a control on the card
* @card: the card instance
* @src_id: the old id
* @dst_id: the new id
*
* Finds the control with the old id from the card, and replaces the
* id with the new one.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
struct snd_ctl_elem_id *dst_id)
{
struct snd_kcontrol *kctl;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, src_id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
kctl->id = *dst_id;
kctl->id.numid = card->last_numid + 1;
card->last_numid += kctl->count;
up_write(&card->controls_rwsem);
return 0;
}
EXPORT_SYMBOL(snd_ctl_rename_id);
/**
* snd_ctl_find_numid - find the control instance with the given number-id
* @card: the card instance
* @numid: the number-id to search
*
* Finds the control instance with the given number-id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !numid))
return NULL;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid)
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_numid);
/**
* snd_ctl_find_id - find the control instance with the given id
* @card: the card instance
* @id: the id to search
*
* Finds the control instance with the given id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card,
struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !id))
return NULL;
if (id->numid != 0)
return snd_ctl_find_numid(card, id->numid);
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.iface != id->iface)
continue;
if (kctl->id.device != id->device)
continue;
if (kctl->id.subdevice != id->subdevice)
continue;
if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name)))
continue;
if (kctl->id.index > id->index)
continue;
if (kctl->id.index + kctl->count <= id->index)
continue;
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_id);
static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl,
unsigned int cmd, void __user *arg)
{
struct snd_ctl_card_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
down_read(&snd_ioctl_rwsem);
info->card = card->number;
strlcpy(info->id, card->id, sizeof(info->id));
strlcpy(info->driver, card->driver, sizeof(info->driver));
strlcpy(info->name, card->shortname, sizeof(info->name));
strlcpy(info->longname, card->longname, sizeof(info->longname));
strlcpy(info->mixername, card->mixername, sizeof(info->mixername));
strlcpy(info->components, card->components, sizeof(info->components));
up_read(&snd_ioctl_rwsem);
if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) {
kfree(info);
return -EFAULT;
}
kfree(info);
return 0;
}
static int snd_ctl_elem_list(struct snd_card *card,
struct snd_ctl_elem_list __user *_list)
{
struct list_head *plist;
struct snd_ctl_elem_list list;
struct snd_kcontrol *kctl;
struct snd_ctl_elem_id *dst, *id;
unsigned int offset, space, jidx;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
offset = list.offset;
space = list.space;
/* try limit maximum space */
if (space > 16384)
return -ENOMEM;
if (space > 0) {
/* allocate temporary buffer for atomic operation */
dst = vmalloc(space * sizeof(struct snd_ctl_elem_id));
if (dst == NULL)
return -ENOMEM;
down_read(&card->controls_rwsem);
list.count = card->controls_count;
plist = card->controls.next;
while (plist != &card->controls) {
if (offset == 0)
break;
kctl = snd_kcontrol(plist);
if (offset < kctl->count)
break;
offset -= kctl->count;
plist = plist->next;
}
list.used = 0;
id = dst;
while (space > 0 && plist != &card->controls) {
kctl = snd_kcontrol(plist);
for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) {
snd_ctl_build_ioff(id, kctl, jidx);
id++;
space--;
list.used++;
}
plist = plist->next;
offset = 0;
}
up_read(&card->controls_rwsem);
if (list.used > 0 &&
copy_to_user(list.pids, dst,
list.used * sizeof(struct snd_ctl_elem_id))) {
vfree(dst);
return -EFAULT;
}
vfree(dst);
} else {
down_read(&card->controls_rwsem);
list.count = card->controls_count;
up_read(&card->controls_rwsem);
}
if (copy_to_user(_list, &list, sizeof(list)))
return -EFAULT;
return 0;
}
static int snd_ctl_elem_info(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info *info)
{
struct snd_card *card = ctl->card;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &info->id);
if (kctl == NULL) {
up_read(&card->controls_rwsem);
return -ENOENT;
}
#ifdef CONFIG_SND_DEBUG
info->access = 0;
#endif
result = kctl->info(kctl, info);
if (result >= 0) {
snd_BUG_ON(info->access);
index_offset = snd_ctl_get_ioff(kctl, &info->id);
vd = &kctl->vd[index_offset];
snd_ctl_build_ioff(&info->id, kctl, index_offset);
info->access = vd->access;
if (vd->owner) {
info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK;
if (vd->owner == ctl)
info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER;
info->owner = pid_vnr(vd->owner->pid);
} else {
info->owner = -1;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info __user *_info)
{
struct snd_ctl_elem_info info;
int result;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
snd_power_lock(ctl->card);
result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_info(ctl, &info);
snd_power_unlock(ctl->card);
if (result >= 0)
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return result;
}
static int snd_ctl_elem_read(struct snd_card *card,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) &&
kctl->get != NULL) {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->get(kctl, control);
} else
result = -EPERM;
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_read_user(struct snd_card *card,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_read(card, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) ||
kctl->put == NULL ||
(file && vd->owner && vd->owner != file)) {
result = -EPERM;
} else {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->put(kctl, control);
}
if (result > 0) {
struct snd_ctl_elem_id id = control->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
return 0;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
struct snd_card *card;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
card = file->card;
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_write(card, file, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_lock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner != NULL)
result = -EBUSY;
else {
vd->owner = file;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner == NULL)
result = -EINVAL;
else if (vd->owner != file)
result = -EPERM;
else {
vd->owner = NULL;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
struct user_element {
struct snd_ctl_elem_info info;
struct snd_card *card;
void *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
unsigned long tlv_data_size; /* TLV data size */
void *priv_data; /* private data (like strings for enumerated type) */
};
static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
*uinfo = ue->info;
return 0;
}
static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
const char *names;
unsigned int item;
item = uinfo->value.enumerated.item;
*uinfo = ue->info;
item = min(item, uinfo->value.enumerated.items - 1);
uinfo->value.enumerated.item = item;
names = ue->priv_data;
for (; item > 0; --item)
names += strlen(names) + 1;
strcpy(uinfo->value.enumerated.name, names);
return 0;
}
static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int change;
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
if (change)
memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
mutex_unlock(&ue->card->user_ctl_lock);
} else {
int ret = 0;
mutex_lock(&ue->card->user_ctl_lock);
if (!ue->tlv_data_size || !ue->tlv_data) {
ret = -ENXIO;
goto err_unlock;
}
if (size < ue->tlv_data_size) {
ret = -ENOSPC;
goto err_unlock;
}
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
ret = -EFAULT;
err_unlock:
mutex_unlock(&ue->card->user_ctl_lock);
if (ret)
return ret;
}
return change;
}
static int snd_ctl_elem_init_enum_names(struct user_element *ue)
{
char *names, *p;
size_t buf_len, name_len;
unsigned int i;
const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr;
if (ue->info.value.enumerated.names_length > 64 * 1024)
return -EINVAL;
names = memdup_user((const void __user *)user_ptrval,
ue->info.value.enumerated.names_length);
if (IS_ERR(names))
return PTR_ERR(names);
/* check that there are enough valid names */
buf_len = ue->info.value.enumerated.names_length;
p = names;
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
kfree(names);
return -EINVAL;
}
p += name_len + 1;
buf_len -= name_len + 1;
}
ue->priv_data = names;
ue->info.value.enumerated.names_ptr = 0;
return 0;
}
static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
{
struct user_element *ue = kcontrol->private_data;
kfree(ue->tlv_data);
kfree(ue->priv_data);
kfree(ue);
}
static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
if (replace) {
err = snd_ctl_remove_user_ctl(file, &info->id);
if (err)
return err;
}
if (card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
}
static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
struct snd_ctl_elem_info __user *_info, int replace)
{
struct snd_ctl_elem_info info;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
return snd_ctl_elem_add(file, &info, replace);
}
static int snd_ctl_elem_remove(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_ctl_elem_id id;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
return snd_ctl_remove_user_ctl(file, &id);
}
static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr)
{
int subscribe;
if (get_user(subscribe, ptr))
return -EFAULT;
if (subscribe < 0) {
subscribe = file->subscribed;
if (put_user(subscribe, ptr))
return -EFAULT;
return 0;
}
if (subscribe) {
file->subscribed = 1;
return 0;
} else if (file->subscribed) {
snd_ctl_empty_read_queue(file);
file->subscribed = 0;
}
return 0;
}
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
struct snd_ctl_tlv __user *_tlv,
int op_flag)
{
struct snd_card *card = file->card;
struct snd_ctl_tlv tlv;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int len;
int err = 0;
if (copy_from_user(&tlv, _tlv, sizeof(tlv)))
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
err = -ENOENT;
goto __kctl_end;
}
if (kctl->tlv.p == NULL) {
err = -ENXIO;
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
(op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
(op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
if (vd->owner != NULL && vd->owner != file) {
err = -EPERM;
goto __kctl_end;
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
if (op_flag) {
err = -ENXIO;
goto __kctl_end;
}
len = kctl->tlv.p[1] + 2 * sizeof(unsigned int);
if (tlv.length < len) {
err = -ENOMEM;
goto __kctl_end;
}
if (copy_to_user(_tlv->tlv, kctl->tlv.p, len))
err = -EFAULT;
}
__kctl_end:
up_read(&card->controls_rwsem);
return err;
}
static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_ctl_file *ctl;
struct snd_card *card;
struct snd_kctl_ioctl *p;
void __user *argp = (void __user *)arg;
int __user *ip = argp;
int err;
ctl = file->private_data;
card = ctl->card;
if (snd_BUG_ON(!card))
return -ENXIO;
switch (cmd) {
case SNDRV_CTL_IOCTL_PVERSION:
return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0;
case SNDRV_CTL_IOCTL_CARD_INFO:
return snd_ctl_card_info(card, ctl, cmd, argp);
case SNDRV_CTL_IOCTL_ELEM_LIST:
return snd_ctl_elem_list(card, argp);
case SNDRV_CTL_IOCTL_ELEM_INFO:
return snd_ctl_elem_info_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_READ:
return snd_ctl_elem_read_user(card, argp);
case SNDRV_CTL_IOCTL_ELEM_WRITE:
return snd_ctl_elem_write_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_LOCK:
return snd_ctl_elem_lock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_UNLOCK:
return snd_ctl_elem_unlock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_ADD:
return snd_ctl_elem_add_user(ctl, argp, 0);
case SNDRV_CTL_IOCTL_ELEM_REPLACE:
return snd_ctl_elem_add_user(ctl, argp, 1);
case SNDRV_CTL_IOCTL_ELEM_REMOVE:
return snd_ctl_elem_remove(ctl, argp);
case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS:
return snd_ctl_subscribe_events(ctl, ip);
case SNDRV_CTL_IOCTL_TLV_READ:
return snd_ctl_tlv_ioctl(ctl, argp, 0);
case SNDRV_CTL_IOCTL_TLV_WRITE:
return snd_ctl_tlv_ioctl(ctl, argp, 1);
case SNDRV_CTL_IOCTL_TLV_COMMAND:
return snd_ctl_tlv_ioctl(ctl, argp, -1);
case SNDRV_CTL_IOCTL_POWER:
return -ENOPROTOOPT;
case SNDRV_CTL_IOCTL_POWER_STATE:
#ifdef CONFIG_PM
return put_user(card->power_state, ip) ? -EFAULT : 0;
#else
return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0;
#endif
}
down_read(&snd_ioctl_rwsem);
list_for_each_entry(p, &snd_control_ioctls, list) {
err = p->fioctl(card, ctl, cmd, arg);
if (err != -ENOIOCTLCMD) {
up_read(&snd_ioctl_rwsem);
return err;
}
}
up_read(&snd_ioctl_rwsem);
dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd);
return -ENOTTY;
}
static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
size_t count, loff_t * offset)
{
struct snd_ctl_file *ctl;
int err = 0;
ssize_t result = 0;
ctl = file->private_data;
if (snd_BUG_ON(!ctl || !ctl->card))
return -ENXIO;
if (!ctl->subscribed)
return -EBADFD;
if (count < sizeof(struct snd_ctl_event))
return -EINVAL;
spin_lock_irq(&ctl->read_lock);
while (count >= sizeof(struct snd_ctl_event)) {
struct snd_ctl_event ev;
struct snd_kctl_event *kev;
while (list_empty(&ctl->events)) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto __end_lock;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&ctl->read_lock);
schedule();
remove_wait_queue(&ctl->change_sleep, &wait);
if (ctl->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ctl->read_lock);
}
kev = snd_kctl_event(ctl->events.next);
ev.type = SNDRV_CTL_EVENT_ELEM;
ev.data.elem.mask = kev->mask;
ev.data.elem.id = kev->id;
list_del(&kev->list);
spin_unlock_irq(&ctl->read_lock);
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) {
err = -EFAULT;
goto __end;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(struct snd_ctl_event);
count -= sizeof(struct snd_ctl_event);
result += sizeof(struct snd_ctl_event);
}
__end_lock:
spin_unlock_irq(&ctl->read_lock);
__end:
return result > 0 ? result : err;
}
static unsigned int snd_ctl_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_ctl_file *ctl;
ctl = file->private_data;
if (!ctl->subscribed)
return 0;
poll_wait(file, &ctl->change_sleep, wait);
mask = 0;
if (!list_empty(&ctl->events))
mask |= POLLIN | POLLRDNORM;
return mask;
}
/*
* register the device-specific control-ioctls.
* called from each device manager like pcm.c, hwdep.c, etc.
*/
static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists)
{
struct snd_kctl_ioctl *pn;
pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL);
if (pn == NULL)
return -ENOMEM;
pn->fioctl = fcn;
down_write(&snd_ioctl_rwsem);
list_add_tail(&pn->list, lists);
up_write(&snd_ioctl_rwsem);
return 0;
}
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl_compat);
#endif
/*
* de-register the device-specific control-ioctls.
*/
static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn,
struct list_head *lists)
{
struct snd_kctl_ioctl *p;
if (snd_BUG_ON(!fcn))
return -EINVAL;
down_write(&snd_ioctl_rwsem);
list_for_each_entry(p, lists, list) {
if (p->fioctl == fcn) {
list_del(&p->list);
up_write(&snd_ioctl_rwsem);
kfree(p);
return 0;
}
}
up_write(&snd_ioctl_rwsem);
snd_BUG();
return -EINVAL;
}
int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat);
#endif
static int snd_ctl_fasync(int fd, struct file * file, int on)
{
struct snd_ctl_file *ctl;
ctl = file->private_data;
return fasync_helper(fd, file, on, &ctl->fasync);
}
/*
* ioctl32 compat
*/
#ifdef CONFIG_COMPAT
#include "control_compat.c"
#else
#define snd_ctl_ioctl_compat NULL
#endif
/*
* INIT PART
*/
static const struct file_operations snd_ctl_f_ops =
{
.owner = THIS_MODULE,
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
.llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
.fasync = snd_ctl_fasync,
};
/*
* registration of the control device
*/
static int snd_ctl_dev_register(struct snd_device *device)
{
struct snd_card *card = device->device_data;
int err, cardnum;
char name[16];
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
sprintf(name, "controlC%i", cardnum);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1,
&snd_ctl_f_ops, card, name)) < 0)
return err;
return 0;
}
/*
* disconnection of the control device
*/
static int snd_ctl_dev_disconnect(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_ctl_file *ctl;
int err, cardnum;
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(ctl, &card->ctl_files, list) {
wake_up(&ctl->change_sleep);
kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
}
read_unlock(&card->ctl_files_rwlock);
if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL,
card, -1)) < 0)
return err;
return 0;
}
/*
* free all controls
*/
static int snd_ctl_dev_free(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_kcontrol *control;
down_write(&card->controls_rwsem);
while (!list_empty(&card->controls)) {
control = snd_kcontrol(card->controls.next);
snd_ctl_remove(card, control);
}
up_write(&card->controls_rwsem);
return 0;
}
/*
* create control core:
* called from init.c
*/
int snd_ctl_create(struct snd_card *card)
{
static struct snd_device_ops ops = {
.dev_free = snd_ctl_dev_free,
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
if (snd_BUG_ON(!card))
return -ENXIO;
return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
}
/*
* Frequently used control callbacks/helpers
*/
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_mono_info);
int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
/**
* snd_ctl_enum_info - fills the info structure for an enumerated control
* @info: the structure to be filled
* @channels: the number of the control's channels; often one
* @items: the number of control values; also the size of @names
* @names: an array containing the names of all control values
*
* Sets all required fields in @info to their appropriate values.
* If the control's accessibility is not the default (readable and writable),
* the caller has to fill @info->access.
*
* Return: Zero.
*/
int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
unsigned int items, const char *const names[])
{
info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
info->count = channels;
info->value.enumerated.items = items;
if (info->value.enumerated.item >= items)
info->value.enumerated.item = items - 1;
strlcpy(info->value.enumerated.name,
names[info->value.enumerated.item],
sizeof(info->value.enumerated.name));
return 0;
}
EXPORT_SYMBOL(snd_ctl_enum_info);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2219_0 |
crossvul-cpp_data_good_2020_8 | /*-------------------------------------------------------------------------
*
* geo_ops.c
* 2D geometric operations
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/utils/adt/geo_ops.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <math.h>
#include <limits.h>
#include <float.h>
#include <ctype.h>
#include "libpq/pqformat.h"
#include "utils/builtins.h"
#include "utils/geo_decls.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
/*
* Internal routines
*/
enum path_delim { PATH_NONE, PATH_OPEN, PATH_CLOSED };
static int point_inside(Point *p, int npts, Point *plist);
static int lseg_crossing(double x, double y, double px, double py);
static BOX *box_construct(double x1, double x2, double y1, double y2);
static BOX *box_copy(BOX *box);
static BOX *box_fill(BOX *result, double x1, double x2, double y1, double y2);
static bool box_ov(BOX *box1, BOX *box2);
static double box_ht(BOX *box);
static double box_wd(BOX *box);
static double circle_ar(CIRCLE *circle);
static CIRCLE *circle_copy(CIRCLE *circle);
static LINE *line_construct_pm(Point *pt, double m);
static void line_construct_pts(LINE *line, Point *pt1, Point *pt2);
static bool lseg_intersect_internal(LSEG *l1, LSEG *l2);
static double lseg_dt(LSEG *l1, LSEG *l2);
static bool on_ps_internal(Point *pt, LSEG *lseg);
static void make_bound_box(POLYGON *poly);
static bool plist_same(int npts, Point *p1, Point *p2);
static Point *point_construct(double x, double y);
static Point *point_copy(Point *pt);
static int single_decode(char *str, float8 *x, char **ss);
static int single_encode(float8 x, char *str);
static int pair_decode(char *str, float8 *x, float8 *y, char **s);
static int pair_encode(float8 x, float8 y, char *str);
static int pair_count(char *s, char delim);
static int path_decode(int opentype, int npts, char *str, int *isopen, char **ss, Point *p);
static char *path_encode(enum path_delim path_delim, int npts, Point *pt);
static void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
static double box_ar(BOX *box);
static void box_cn(Point *center, BOX *box);
static Point *interpt_sl(LSEG *lseg, LINE *line);
static bool has_interpt_sl(LSEG *lseg, LINE *line);
static double dist_pl_internal(Point *pt, LINE *line);
static double dist_ps_internal(Point *pt, LSEG *lseg);
static Point *line_interpt_internal(LINE *l1, LINE *l2);
static bool lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start);
static Point *lseg_interpt_internal(LSEG *l1, LSEG *l2);
/*
* Delimiters for input and output strings.
* LDELIM, RDELIM, and DELIM are left, right, and separator delimiters, respectively.
* LDELIM_EP, RDELIM_EP are left and right delimiters for paths with endpoints.
*/
#define LDELIM '('
#define RDELIM ')'
#define DELIM ','
#define LDELIM_EP '['
#define RDELIM_EP ']'
#define LDELIM_C '<'
#define RDELIM_C '>'
/* Maximum number of characters printed by pair_encode() */
/* ...+3+7 : 3 accounts for extra_float_digits max value */
#define P_MAXLEN (2*(DBL_DIG+3+7)+1)
/*
* Geometric data types are composed of points.
* This code tries to support a common format throughout the data types,
* to allow for more predictable usage and data type conversion.
* The fundamental unit is the point. Other units are line segments,
* open paths, boxes, closed paths, and polygons (which should be considered
* non-intersecting closed paths).
*
* Data representation is as follows:
* point: (x,y)
* line segment: [(x1,y1),(x2,y2)]
* box: (x1,y1),(x2,y2)
* open path: [(x1,y1),...,(xn,yn)]
* closed path: ((x1,y1),...,(xn,yn))
* polygon: ((x1,y1),...,(xn,yn))
*
* For boxes, the points are opposite corners with the first point at the top right.
* For closed paths and polygons, the points should be reordered to allow
* fast and correct equality comparisons.
*
* XXX perhaps points in complex shapes should be reordered internally
* to allow faster internal operations, but should keep track of input order
* and restore that order for text output - tgl 97/01/16
*/
static int
single_decode(char *str, float8 *x, char **s)
{
char *cp;
if (!PointerIsValid(str))
return FALSE;
while (isspace((unsigned char) *str))
str++;
*x = strtod(str, &cp);
#ifdef GEODEBUG
printf("single_decode- (%x) try decoding %s to %g\n", (cp - str), str, *x);
#endif
if (cp <= str)
return FALSE;
while (isspace((unsigned char) *cp))
cp++;
if (s != NULL)
*s = cp;
return TRUE;
} /* single_decode() */
static int
single_encode(float8 x, char *str)
{
int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
sprintf(str, "%.*g", ndig, x);
return TRUE;
} /* single_encode() */
static int
pair_decode(char *str, float8 *x, float8 *y, char **s)
{
int has_delim;
char *cp;
if (!PointerIsValid(str))
return FALSE;
while (isspace((unsigned char) *str))
str++;
if ((has_delim = (*str == LDELIM)))
str++;
while (isspace((unsigned char) *str))
str++;
*x = strtod(str, &cp);
if (cp <= str)
return FALSE;
while (isspace((unsigned char) *cp))
cp++;
if (*cp++ != DELIM)
return FALSE;
while (isspace((unsigned char) *cp))
cp++;
*y = strtod(cp, &str);
if (str <= cp)
return FALSE;
while (isspace((unsigned char) *str))
str++;
if (has_delim)
{
if (*str != RDELIM)
return FALSE;
str++;
while (isspace((unsigned char) *str))
str++;
}
if (s != NULL)
*s = str;
return TRUE;
}
static int
pair_encode(float8 x, float8 y, char *str)
{
int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
sprintf(str, "%.*g,%.*g", ndig, x, ndig, y);
return TRUE;
}
static int
path_decode(int opentype, int npts, char *str, int *isopen, char **ss, Point *p)
{
int depth = 0;
char *s,
*cp;
int i;
s = str;
while (isspace((unsigned char) *s))
s++;
if ((*isopen = (*s == LDELIM_EP)))
{
/* no open delimiter allowed? */
if (!opentype)
return FALSE;
depth++;
s++;
while (isspace((unsigned char) *s))
s++;
}
else if (*s == LDELIM)
{
cp = (s + 1);
while (isspace((unsigned char) *cp))
cp++;
if (*cp == LDELIM)
{
#ifdef NOT_USED
/* nested delimiters with only one point? */
if (npts <= 1)
return FALSE;
#endif
depth++;
s = cp;
}
else if (strrchr(s, LDELIM) == s)
{
depth++;
s = cp;
}
}
for (i = 0; i < npts; i++)
{
if (!pair_decode(s, &(p->x), &(p->y), &s))
return FALSE;
if (*s == DELIM)
s++;
p++;
}
while (depth > 0)
{
if ((*s == RDELIM)
|| ((*s == RDELIM_EP) && (*isopen) && (depth == 1)))
{
depth--;
s++;
while (isspace((unsigned char) *s))
s++;
}
else
return FALSE;
}
*ss = s;
return TRUE;
} /* path_decode() */
static char *
path_encode(enum path_delim path_delim, int npts, Point *pt)
{
int size = npts * (P_MAXLEN + 3) + 2;
char *result;
char *cp;
int i;
/* Check for integer overflow */
if ((size - 2) / npts != (P_MAXLEN + 3))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
result = palloc(size);
cp = result;
switch (path_delim)
{
case PATH_CLOSED:
*cp++ = LDELIM;
break;
case PATH_OPEN:
*cp++ = LDELIM_EP;
break;
case PATH_NONE:
break;
}
for (i = 0; i < npts; i++)
{
*cp++ = LDELIM;
if (!pair_encode(pt->x, pt->y, cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not format \"path\" value")));
cp += strlen(cp);
*cp++ = RDELIM;
*cp++ = DELIM;
pt++;
}
cp--;
switch (path_delim)
{
case PATH_CLOSED:
*cp++ = RDELIM;
break;
case PATH_OPEN:
*cp++ = RDELIM_EP;
break;
case PATH_NONE:
break;
}
*cp = '\0';
return result;
} /* path_encode() */
/*-------------------------------------------------------------
* pair_count - count the number of points
* allow the following notation:
* '((1,2),(3,4))'
* '(1,3,2,4)'
* require an odd number of delim characters in the string
*-------------------------------------------------------------*/
static int
pair_count(char *s, char delim)
{
int ndelim = 0;
while ((s = strchr(s, delim)) != NULL)
{
ndelim++;
s++;
}
return (ndelim % 2) ? ((ndelim + 1) / 2) : -1;
}
/***********************************************************************
**
** Routines for two-dimensional boxes.
**
***********************************************************************/
/*----------------------------------------------------------
* Formatting and conversion routines.
*---------------------------------------------------------*/
/* box_in - convert a string to internal form.
*
* External format: (two corners of box)
* "(f8, f8), (f8, f8)"
* also supports the older style "(f8, f8, f8, f8)"
*/
Datum
box_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
BOX *box = (BOX *) palloc(sizeof(BOX));
int isopen;
char *s;
double x,
y;
if ((!path_decode(FALSE, 2, str, &isopen, &s, &(box->high)))
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type box: \"%s\"", str)));
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
{
x = box->high.x;
box->high.x = box->low.x;
box->low.x = x;
}
if (box->high.y < box->low.y)
{
y = box->high.y;
box->high.y = box->low.y;
box->low.y = y;
}
PG_RETURN_BOX_P(box);
}
/* box_out - convert a box to external form.
*/
Datum
box_out(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_CSTRING(path_encode(PATH_NONE, 2, &(box->high)));
}
/*
* box_recv - converts external binary format to box
*/
Datum
box_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
BOX *box;
double x,
y;
box = (BOX *) palloc(sizeof(BOX));
box->high.x = pq_getmsgfloat8(buf);
box->high.y = pq_getmsgfloat8(buf);
box->low.x = pq_getmsgfloat8(buf);
box->low.y = pq_getmsgfloat8(buf);
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
{
x = box->high.x;
box->high.x = box->low.x;
box->low.x = x;
}
if (box->high.y < box->low.y)
{
y = box->high.y;
box->high.y = box->low.y;
box->low.y = y;
}
PG_RETURN_BOX_P(box);
}
/*
* box_send - converts box to binary format
*/
Datum
box_send(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, box->high.x);
pq_sendfloat8(&buf, box->high.y);
pq_sendfloat8(&buf, box->low.x);
pq_sendfloat8(&buf, box->low.y);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/* box_construct - fill in a new box.
*/
static BOX *
box_construct(double x1, double x2, double y1, double y2)
{
BOX *result = (BOX *) palloc(sizeof(BOX));
return box_fill(result, x1, x2, y1, y2);
}
/* box_fill - fill in a given box struct
*/
static BOX *
box_fill(BOX *result, double x1, double x2, double y1, double y2)
{
if (x1 > x2)
{
result->high.x = x1;
result->low.x = x2;
}
else
{
result->high.x = x2;
result->low.x = x1;
}
if (y1 > y2)
{
result->high.y = y1;
result->low.y = y2;
}
else
{
result->high.y = y2;
result->low.y = y1;
}
return result;
}
/* box_copy - copy a box
*/
static BOX *
box_copy(BOX *box)
{
BOX *result = (BOX *) palloc(sizeof(BOX));
memcpy((char *) result, (char *) box, sizeof(BOX));
return result;
}
/*----------------------------------------------------------
* Relational operators for BOXes.
* <, >, <=, >=, and == are based on box area.
*---------------------------------------------------------*/
/* box_same - are two boxes identical?
*/
Datum
box_same(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPeq(box1->high.x, box2->high.x) &&
FPeq(box1->low.x, box2->low.x) &&
FPeq(box1->high.y, box2->high.y) &&
FPeq(box1->low.y, box2->low.y));
}
/* box_overlap - does box1 overlap box2?
*/
Datum
box_overlap(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(box_ov(box1, box2));
}
static bool
box_ov(BOX *box1, BOX *box2)
{
return (FPle(box1->low.x, box2->high.x) &&
FPle(box2->low.x, box1->high.x) &&
FPle(box1->low.y, box2->high.y) &&
FPle(box2->low.y, box1->high.y));
}
/* box_left - is box1 strictly left of box2?
*/
Datum
box_left(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPlt(box1->high.x, box2->low.x));
}
/* box_overleft - is the right edge of box1 at or left of
* the right edge of box2?
*
* This is "less than or equal" for the end of a time range,
* when time ranges are stored as rectangles.
*/
Datum
box_overleft(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.x, box2->high.x));
}
/* box_right - is box1 strictly right of box2?
*/
Datum
box_right(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPgt(box1->low.x, box2->high.x));
}
/* box_overright - is the left edge of box1 at or right of
* the left edge of box2?
*
* This is "greater than or equal" for time ranges, when time ranges
* are stored as rectangles.
*/
Datum
box_overright(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->low.x, box2->low.x));
}
/* box_below - is box1 strictly below box2?
*/
Datum
box_below(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPlt(box1->high.y, box2->low.y));
}
/* box_overbelow - is the upper edge of box1 at or below
* the upper edge of box2?
*/
Datum
box_overbelow(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.y, box2->high.y));
}
/* box_above - is box1 strictly above box2?
*/
Datum
box_above(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPgt(box1->low.y, box2->high.y));
}
/* box_overabove - is the lower edge of box1 at or above
* the lower edge of box2?
*/
Datum
box_overabove(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->low.y, box2->low.y));
}
/* box_contained - is box1 contained by box2?
*/
Datum
box_contained(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.x, box2->high.x) &&
FPge(box1->low.x, box2->low.x) &&
FPle(box1->high.y, box2->high.y) &&
FPge(box1->low.y, box2->low.y));
}
/* box_contain - does box1 contain box2?
*/
Datum
box_contain(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->high.x, box2->high.x) &&
FPle(box1->low.x, box2->low.x) &&
FPge(box1->high.y, box2->high.y) &&
FPle(box1->low.y, box2->low.y));
}
/* box_positionop -
* is box1 entirely {above,below} box2?
*
* box_below_eq and box_above_eq are obsolete versions that (probably
* erroneously) accept the equal-boundaries case. Since these are not
* in sync with the box_left and box_right code, they are deprecated and
* not supported in the PG 8.1 rtree operator class extension.
*/
Datum
box_below_eq(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box1->high.y, box2->low.y));
}
Datum
box_above_eq(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box1->low.y, box2->high.y));
}
/* box_relop - is area(box1) relop area(box2), within
* our accuracy constraint?
*/
Datum
box_lt(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPlt(box_ar(box1), box_ar(box2)));
}
Datum
box_gt(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPgt(box_ar(box1), box_ar(box2)));
}
Datum
box_eq(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPeq(box_ar(box1), box_ar(box2)));
}
Datum
box_le(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPle(box_ar(box1), box_ar(box2)));
}
Datum
box_ge(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(FPge(box_ar(box1), box_ar(box2)));
}
/*----------------------------------------------------------
* "Arithmetic" operators on boxes.
*---------------------------------------------------------*/
/* box_area - returns the area of the box.
*/
Datum
box_area(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box_ar(box));
}
/* box_width - returns the width of the box
* (horizontal magnitude).
*/
Datum
box_width(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box->high.x - box->low.x);
}
/* box_height - returns the height of the box
* (vertical magnitude).
*/
Datum
box_height(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box->high.y - box->low.y);
}
/* box_distance - returns the distance between the
* center points of two boxes.
*/
Datum
box_distance(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
Point a,
b;
box_cn(&a, box1);
box_cn(&b, box2);
PG_RETURN_FLOAT8(HYPOT(a.x - b.x, a.y - b.y));
}
/* box_center - returns the center point of the box.
*/
Datum
box_center(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *result = (Point *) palloc(sizeof(Point));
box_cn(result, box);
PG_RETURN_POINT_P(result);
}
/* box_ar - returns the area of the box.
*/
static double
box_ar(BOX *box)
{
return box_wd(box) * box_ht(box);
}
/* box_cn - stores the centerpoint of the box into *center.
*/
static void
box_cn(Point *center, BOX *box)
{
center->x = (box->high.x + box->low.x) / 2.0;
center->y = (box->high.y + box->low.y) / 2.0;
}
/* box_wd - returns the width (length) of the box
* (horizontal magnitude).
*/
static double
box_wd(BOX *box)
{
return box->high.x - box->low.x;
}
/* box_ht - returns the height of the box
* (vertical magnitude).
*/
static double
box_ht(BOX *box)
{
return box->high.y - box->low.y;
}
/*----------------------------------------------------------
* Funky operations.
*---------------------------------------------------------*/
/* box_intersect -
* returns the overlapping portion of two boxes,
* or NULL if they do not intersect.
*/
Datum
box_intersect(PG_FUNCTION_ARGS)
{
BOX *box1 = PG_GETARG_BOX_P(0);
BOX *box2 = PG_GETARG_BOX_P(1);
BOX *result;
if (!box_ov(box1, box2))
PG_RETURN_NULL();
result = (BOX *) palloc(sizeof(BOX));
result->high.x = Min(box1->high.x, box2->high.x);
result->low.x = Max(box1->low.x, box2->low.x);
result->high.y = Min(box1->high.y, box2->high.y);
result->low.y = Max(box1->low.y, box2->low.y);
PG_RETURN_BOX_P(result);
}
/* box_diagonal -
* returns a line segment which happens to be the
* positive-slope diagonal of "box".
*/
Datum
box_diagonal(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
LSEG *result = (LSEG *) palloc(sizeof(LSEG));
statlseg_construct(result, &box->high, &box->low);
PG_RETURN_LSEG_P(result);
}
/***********************************************************************
**
** Routines for 2D lines.
**
***********************************************************************/
static bool
line_decode(const char *str, LINE *line)
{
char *tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != '{')
return false;
line->A = strtod(str, &tail);
if (tail <= str)
return false;
str = tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != DELIM)
return false;
line->B = strtod(str, &tail);
if (tail <= str)
return false;
str = tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != DELIM)
return false;
line->C = strtod(str, &tail);
if (tail <= str)
return false;
str = tail;
while (isspace((unsigned char) *str))
str++;
if (*str++ != '}')
return false;
while (isspace((unsigned char) *str))
str++;
if (*str)
return false;
return true;
}
Datum
line_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
LINE *line;
LSEG lseg;
int isopen;
char *s;
line = (LINE *) palloc(sizeof(LINE));
if (path_decode(TRUE, 2, str, &isopen, &s, &(lseg.p[0])) && *s == '\0')
{
if (FPeq(lseg.p[0].x, lseg.p[1].x) && FPeq(lseg.p[0].y, lseg.p[1].y))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid line specification: must be two distinct points")));
line_construct_pts(line, &lseg.p[0], &lseg.p[1]);
}
else if (line_decode(str, line))
{
if (FPzero(line->A) && FPzero(line->B))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid line specification: A and B cannot both be zero")));
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type line: \"%s\"", str)));
PG_RETURN_LINE_P(line);
}
Datum
line_out(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
PG_RETURN_CSTRING(psprintf("{%.*g,%.*g,%.*g}", ndig, line->A, ndig, line->B, ndig, line->C));
}
/*
* line_recv - converts external binary format to line
*/
Datum
line_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LINE *line;
line = (LINE *) palloc(sizeof(LINE));
line->A = pq_getmsgfloat8(buf);
line->B = pq_getmsgfloat8(buf);
line->C = pq_getmsgfloat8(buf);
PG_RETURN_LINE_P(line);
}
/*
* line_send - converts line to binary format
*/
Datum
line_send(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, line->A);
pq_sendfloat8(&buf, line->B);
pq_sendfloat8(&buf, line->C);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Conversion routines from one line formula to internal.
* Internal form: Ax+By+C=0
*---------------------------------------------------------*/
/* line_construct_pm()
* point-slope
*/
static LINE *
line_construct_pm(Point *pt, double m)
{
LINE *result = (LINE *) palloc(sizeof(LINE));
if (m == DBL_MAX)
{
/* vertical - use "x = C" */
result->A = -1;
result->B = 0;
result->C = pt->x;
}
else
{
/* use "mx - y + yinter = 0" */
result->A = m;
result->B = -1.0;
result->C = pt->y - m * pt->x;
}
return result;
}
/*
* Fill already-allocated LINE struct from two points on the line
*/
static void
line_construct_pts(LINE *line, Point *pt1, Point *pt2)
{
if (FPeq(pt1->x, pt2->x))
{ /* vertical */
/* use "x = C" */
line->A = -1;
line->B = 0;
line->C = pt1->x;
#ifdef GEODEBUG
printf("line_construct_pts- line is vertical\n");
#endif
}
else if (FPeq(pt1->y, pt2->y))
{ /* horizontal */
/* use "y = C" */
line->A = 0;
line->B = -1;
line->C = pt1->y;
#ifdef GEODEBUG
printf("line_construct_pts- line is horizontal\n");
#endif
}
else
{
/* use "mx - y + yinter = 0" */
line->A = (pt2->y - pt1->y) / (pt2->x - pt1->x);
line->B = -1.0;
line->C = pt1->y - line->A * pt1->x;
/* on some platforms, the preceding expression tends to produce -0 */
if (line->C == 0.0)
line->C = 0.0;
#ifdef GEODEBUG
printf("line_construct_pts- line is neither vertical nor horizontal (diffs x=%.*g, y=%.*g\n",
DBL_DIG, (pt2->x - pt1->x), DBL_DIG, (pt2->y - pt1->y));
#endif
}
}
/* line_construct_pp()
* two points
*/
Datum
line_construct_pp(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
LINE *result = (LINE *) palloc(sizeof(LINE));
line_construct_pts(result, pt1, pt2);
PG_RETURN_LINE_P(result);
}
/*----------------------------------------------------------
* Relative position routines.
*---------------------------------------------------------*/
Datum
line_intersect(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(!DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
LinePGetDatum(l2))));
}
Datum
line_parallel(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
if (FPzero(l1->B))
PG_RETURN_BOOL(FPzero(l2->B));
PG_RETURN_BOOL(FPeq(l2->A, l1->A * (l2->B / l1->B)));
}
Datum
line_perp(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
if (FPzero(l1->A))
PG_RETURN_BOOL(FPzero(l2->B));
else if (FPzero(l1->B))
PG_RETURN_BOOL(FPzero(l2->A));
PG_RETURN_BOOL(FPeq(((l1->A * l2->B) / (l1->B * l2->A)), -1.0));
}
Datum
line_vertical(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
PG_RETURN_BOOL(FPzero(line->B));
}
Datum
line_horizontal(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
PG_RETURN_BOOL(FPzero(line->A));
}
Datum
line_eq(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
double k;
if (!FPzero(l2->A))
k = l1->A / l2->A;
else if (!FPzero(l2->B))
k = l1->B / l2->B;
else if (!FPzero(l2->C))
k = l1->C / l2->C;
else
k = 1.0;
PG_RETURN_BOOL(FPeq(l1->A, k * l2->A) &&
FPeq(l1->B, k * l2->B) &&
FPeq(l1->C, k * l2->C));
}
/*----------------------------------------------------------
* Line arithmetic routines.
*---------------------------------------------------------*/
/* line_distance()
* Distance between two lines.
*/
Datum
line_distance(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
float8 result;
Point *tmp;
if (!DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
LinePGetDatum(l2))))
PG_RETURN_FLOAT8(0.0);
if (FPzero(l1->B)) /* vertical? */
PG_RETURN_FLOAT8(fabs(l1->C - l2->C));
tmp = point_construct(0.0, l1->C);
result = dist_pl_internal(tmp, l2);
PG_RETURN_FLOAT8(result);
}
/* line_interpt()
* Point where two lines l1, l2 intersect (if any)
*/
Datum
line_interpt(PG_FUNCTION_ARGS)
{
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
Point *result;
result = line_interpt_internal(l1, l2);
if (result == NULL)
PG_RETURN_NULL();
PG_RETURN_POINT_P(result);
}
/*
* Internal version of line_interpt
*
* returns a NULL pointer if no intersection point
*/
static Point *
line_interpt_internal(LINE *l1, LINE *l2)
{
Point *result;
double x,
y;
/*
* NOTE: if the lines are identical then we will find they are parallel
* and report "no intersection". This is a little weird, but since
* there's no *unique* intersection, maybe it's appropriate behavior.
*/
if (DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
LinePGetDatum(l2))))
return NULL;
if (FPzero(l1->B)) /* l1 vertical? */
{
x = l1->C;
y = (l2->A * x + l2->C);
}
else if (FPzero(l2->B)) /* l2 vertical? */
{
x = l2->C;
y = (l1->A * x + l1->C);
}
else
{
x = (l1->C - l2->C) / (l2->A - l1->A);
y = (l1->A * x + l1->C);
}
result = point_construct(x, y);
#ifdef GEODEBUG
printf("line_interpt- lines are A=%.*g, B=%.*g, C=%.*g, A=%.*g, B=%.*g, C=%.*g\n",
DBL_DIG, l1->A, DBL_DIG, l1->B, DBL_DIG, l1->C, DBL_DIG, l2->A, DBL_DIG, l2->B, DBL_DIG, l2->C);
printf("line_interpt- lines intersect at (%.*g,%.*g)\n", DBL_DIG, x, DBL_DIG, y);
#endif
return result;
}
/***********************************************************************
**
** Routines for 2D paths (sequences of line segments, also
** called `polylines').
**
** This is not a general package for geometric paths,
** which of course include polygons; the emphasis here
** is on (for example) usefulness in wire layout.
**
***********************************************************************/
/*----------------------------------------------------------
* String to path / path to string conversion.
* External format:
* "((xcoord, ycoord),... )"
* "[(xcoord, ycoord),... ]"
* "(xcoord, ycoord),... "
* "[xcoord, ycoord,... ]"
* Also support older format:
* "(closed, npts, xcoord, ycoord,... )"
*---------------------------------------------------------*/
Datum
path_area(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
double area = 0.0;
int i,
j;
if (!path->closed)
PG_RETURN_NULL();
for (i = 0; i < path->npts; i++)
{
j = (i + 1) % path->npts;
area += path->p[i].x * path->p[j].y;
area -= path->p[i].y * path->p[j].x;
}
area *= 0.5;
PG_RETURN_FLOAT8(area < 0.0 ? -area : area);
}
Datum
path_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
PATH *path;
int isopen;
char *s;
int npts;
int size;
int base_size;
int depth = 0;
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type path: \"%s\"", str)));
s = str;
while (isspace((unsigned char) *s))
s++;
/* skip single leading paren */
if ((*s == LDELIM) && (strrchr(s, LDELIM) == s))
{
s++;
depth++;
}
base_size = sizeof(path->p[0]) * npts;
size = offsetof(PATH, p[0]) + base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(path->p[0]) || size <= base_size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
path = (PATH *) palloc(size);
SET_VARSIZE(path, size);
path->npts = npts;
if ((!path_decode(TRUE, npts, s, &isopen, &s, &(path->p[0])))
&& (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type path: \"%s\"", str)));
path->closed = (!isopen);
/* prevent instability in unused pad bytes */
path->dummy = 0;
PG_RETURN_PATH_P(path);
}
Datum
path_out(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_CSTRING(path_encode(path->closed ? PATH_CLOSED : PATH_OPEN, path->npts, path->p));
}
/*
* path_recv - converts external binary format to path
*
* External representation is closed flag (a boolean byte), int32 number
* of points, and the points.
*/
Datum
path_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
PATH *path;
int closed;
int32 npts;
int32 i;
int size;
closed = pq_getmsgbyte(buf);
npts = pq_getmsgint(buf, sizeof(int32));
if (npts <= 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid number of points in external \"path\" value")));
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
SET_VARSIZE(path, size);
path->npts = npts;
path->closed = (closed ? 1 : 0);
/* prevent instability in unused pad bytes */
path->dummy = 0;
for (i = 0; i < npts; i++)
{
path->p[i].x = pq_getmsgfloat8(buf);
path->p[i].y = pq_getmsgfloat8(buf);
}
PG_RETURN_PATH_P(path);
}
/*
* path_send - converts path to binary format
*/
Datum
path_send(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
StringInfoData buf;
int32 i;
pq_begintypsend(&buf);
pq_sendbyte(&buf, path->closed ? 1 : 0);
pq_sendint(&buf, path->npts, sizeof(int32));
for (i = 0; i < path->npts; i++)
{
pq_sendfloat8(&buf, path->p[i].x);
pq_sendfloat8(&buf, path->p[i].y);
}
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Relational operators.
* These are based on the path cardinality,
* as stupid as that sounds.
*
* Better relops and access methods coming soon.
*---------------------------------------------------------*/
Datum
path_n_lt(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts < p2->npts);
}
Datum
path_n_gt(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts > p2->npts);
}
Datum
path_n_eq(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts == p2->npts);
}
Datum
path_n_le(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts <= p2->npts);
}
Datum
path_n_ge(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PG_RETURN_BOOL(p1->npts >= p2->npts);
}
/*----------------------------------------------------------
* Conversion operators.
*---------------------------------------------------------*/
Datum
path_isclosed(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_BOOL(path->closed);
}
Datum
path_isopen(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_BOOL(!path->closed);
}
Datum
path_npoints(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
PG_RETURN_INT32(path->npts);
}
Datum
path_close(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
path->closed = TRUE;
PG_RETURN_PATH_P(path);
}
Datum
path_open(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
path->closed = FALSE;
PG_RETURN_PATH_P(path);
}
/* path_inter -
* Does p1 intersect p2 at any point?
* Use bounding boxes for a quick (O(n)) check, then do a
* O(n^2) iterative edge check.
*/
Datum
path_inter(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
BOX b1,
b2;
int i,
j;
LSEG seg1,
seg2;
if (p1->npts <= 0 || p2->npts <= 0)
PG_RETURN_BOOL(false);
b1.high.x = b1.low.x = p1->p[0].x;
b1.high.y = b1.low.y = p1->p[0].y;
for (i = 1; i < p1->npts; i++)
{
b1.high.x = Max(p1->p[i].x, b1.high.x);
b1.high.y = Max(p1->p[i].y, b1.high.y);
b1.low.x = Min(p1->p[i].x, b1.low.x);
b1.low.y = Min(p1->p[i].y, b1.low.y);
}
b2.high.x = b2.low.x = p2->p[0].x;
b2.high.y = b2.low.y = p2->p[0].y;
for (i = 1; i < p2->npts; i++)
{
b2.high.x = Max(p2->p[i].x, b2.high.x);
b2.high.y = Max(p2->p[i].y, b2.high.y);
b2.low.x = Min(p2->p[i].x, b2.low.x);
b2.low.y = Min(p2->p[i].y, b2.low.y);
}
if (!box_ov(&b1, &b2))
PG_RETURN_BOOL(false);
/* pairwise check lseg intersections */
for (i = 0; i < p1->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!p1->closed)
continue;
iprev = p1->npts - 1; /* include the closure segment */
}
for (j = 0; j < p2->npts; j++)
{
int jprev;
if (j > 0)
jprev = j - 1;
else
{
if (!p2->closed)
continue;
jprev = p2->npts - 1; /* include the closure segment */
}
statlseg_construct(&seg1, &p1->p[iprev], &p1->p[i]);
statlseg_construct(&seg2, &p2->p[jprev], &p2->p[j]);
if (lseg_intersect_internal(&seg1, &seg2))
PG_RETURN_BOOL(true);
}
}
/* if we dropped through, no two segs intersected */
PG_RETURN_BOOL(false);
}
/* path_distance()
* This essentially does a cartesian product of the lsegs in the
* two paths, and finds the min distance between any two lsegs
*/
Datum
path_distance(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
float8 min = 0.0; /* initialize to keep compiler quiet */
bool have_min = false;
float8 tmp;
int i,
j;
LSEG seg1,
seg2;
for (i = 0; i < p1->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!p1->closed)
continue;
iprev = p1->npts - 1; /* include the closure segment */
}
for (j = 0; j < p2->npts; j++)
{
int jprev;
if (j > 0)
jprev = j - 1;
else
{
if (!p2->closed)
continue;
jprev = p2->npts - 1; /* include the closure segment */
}
statlseg_construct(&seg1, &p1->p[iprev], &p1->p[i]);
statlseg_construct(&seg2, &p2->p[jprev], &p2->p[j]);
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
have_min = true;
}
}
}
if (!have_min)
PG_RETURN_NULL();
PG_RETURN_FLOAT8(min);
}
/*----------------------------------------------------------
* "Arithmetic" operations.
*---------------------------------------------------------*/
Datum
path_length(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
float8 result = 0.0;
int i;
for (i = 0; i < path->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!path->closed)
continue;
iprev = path->npts - 1; /* include the closure segment */
}
result += point_dt(&path->p[iprev], &path->p[i]);
}
PG_RETURN_FLOAT8(result);
}
/***********************************************************************
**
** Routines for 2D points.
**
***********************************************************************/
/*----------------------------------------------------------
* String to point, point to string conversion.
* External format:
* "(x,y)"
* "x,y"
*---------------------------------------------------------*/
Datum
point_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
Point *point;
double x,
y;
char *s;
if (!pair_decode(str, &x, &y, &s) || (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type point: \"%s\"", str)));
point = (Point *) palloc(sizeof(Point));
point->x = x;
point->y = y;
PG_RETURN_POINT_P(point);
}
Datum
point_out(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
PG_RETURN_CSTRING(path_encode(PATH_NONE, 1, pt));
}
/*
* point_recv - converts external binary format to point
*/
Datum
point_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Point *point;
point = (Point *) palloc(sizeof(Point));
point->x = pq_getmsgfloat8(buf);
point->y = pq_getmsgfloat8(buf);
PG_RETURN_POINT_P(point);
}
/*
* point_send - converts point to binary format
*/
Datum
point_send(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, pt->x);
pq_sendfloat8(&buf, pt->y);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
static Point *
point_construct(double x, double y)
{
Point *result = (Point *) palloc(sizeof(Point));
result->x = x;
result->y = y;
return result;
}
static Point *
point_copy(Point *pt)
{
Point *result;
if (!PointerIsValid(pt))
return NULL;
result = (Point *) palloc(sizeof(Point));
result->x = pt->x;
result->y = pt->y;
return result;
}
/*----------------------------------------------------------
* Relational operators for Points.
* Since we do have a sense of coordinates being
* "equal" to a given accuracy (point_vert, point_horiz),
* the other ops must preserve that sense. This means
* that results may, strictly speaking, be a lie (unless
* EPSILON = 0.0).
*---------------------------------------------------------*/
Datum
point_left(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPlt(pt1->x, pt2->x));
}
Datum
point_right(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPgt(pt1->x, pt2->x));
}
Datum
point_above(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPgt(pt1->y, pt2->y));
}
Datum
point_below(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPlt(pt1->y, pt2->y));
}
Datum
point_vert(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPeq(pt1->x, pt2->x));
}
Datum
point_horiz(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPeq(pt1->y, pt2->y));
}
Datum
point_eq(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPeq(pt1->x, pt2->x) && FPeq(pt1->y, pt2->y));
}
Datum
point_ne(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(FPne(pt1->x, pt2->x) || FPne(pt1->y, pt2->y));
}
/*----------------------------------------------------------
* "Arithmetic" operators on points.
*---------------------------------------------------------*/
Datum
point_distance(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_FLOAT8(HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
}
double
point_dt(Point *pt1, Point *pt2)
{
#ifdef GEODEBUG
printf("point_dt- segment (%f,%f),(%f,%f) length is %f\n",
pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
#endif
return HYPOT(pt1->x - pt2->x, pt1->y - pt2->y);
}
Datum
point_slope(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
PG_RETURN_FLOAT8(point_sl(pt1, pt2));
}
double
point_sl(Point *pt1, Point *pt2)
{
return (FPeq(pt1->x, pt2->x)
? (double) DBL_MAX
: (pt1->y - pt2->y) / (pt1->x - pt2->x));
}
/***********************************************************************
**
** Routines for 2D line segments.
**
***********************************************************************/
/*----------------------------------------------------------
* String to lseg, lseg to string conversion.
* External forms: "[(x1, y1), (x2, y2)]"
* "(x1, y1), (x2, y2)"
* "x1, y1, x2, y2"
* closed form ok "((x1, y1), (x2, y2))"
* (old form) "(x1, y1, x2, y2)"
*---------------------------------------------------------*/
Datum
lseg_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
LSEG *lseg;
int isopen;
char *s;
lseg = (LSEG *) palloc(sizeof(LSEG));
if ((!path_decode(TRUE, 2, str, &isopen, &s, &(lseg->p[0])))
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type lseg: \"%s\"", str)));
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
#endif
PG_RETURN_LSEG_P(lseg);
}
Datum
lseg_out(PG_FUNCTION_ARGS)
{
LSEG *ls = PG_GETARG_LSEG_P(0);
PG_RETURN_CSTRING(path_encode(PATH_OPEN, 2, (Point *) &(ls->p[0])));
}
/*
* lseg_recv - converts external binary format to lseg
*/
Datum
lseg_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LSEG *lseg;
lseg = (LSEG *) palloc(sizeof(LSEG));
lseg->p[0].x = pq_getmsgfloat8(buf);
lseg->p[0].y = pq_getmsgfloat8(buf);
lseg->p[1].x = pq_getmsgfloat8(buf);
lseg->p[1].y = pq_getmsgfloat8(buf);
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
#endif
PG_RETURN_LSEG_P(lseg);
}
/*
* lseg_send - converts lseg to binary format
*/
Datum
lseg_send(PG_FUNCTION_ARGS)
{
LSEG *ls = PG_GETARG_LSEG_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, ls->p[0].x);
pq_sendfloat8(&buf, ls->p[0].y);
pq_sendfloat8(&buf, ls->p[1].x);
pq_sendfloat8(&buf, ls->p[1].y);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/* lseg_construct -
* form a LSEG from two Points.
*/
Datum
lseg_construct(PG_FUNCTION_ARGS)
{
Point *pt1 = PG_GETARG_POINT_P(0);
Point *pt2 = PG_GETARG_POINT_P(1);
LSEG *result = (LSEG *) palloc(sizeof(LSEG));
result->p[0].x = pt1->x;
result->p[0].y = pt1->y;
result->p[1].x = pt2->x;
result->p[1].y = pt2->y;
#ifdef NOT_USED
result->m = point_sl(pt1, pt2);
#endif
PG_RETURN_LSEG_P(result);
}
/* like lseg_construct, but assume space already allocated */
static void
statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2)
{
lseg->p[0].x = pt1->x;
lseg->p[0].y = pt1->y;
lseg->p[1].x = pt2->x;
lseg->p[1].y = pt2->y;
#ifdef NOT_USED
lseg->m = point_sl(pt1, pt2);
#endif
}
Datum
lseg_length(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
PG_RETURN_FLOAT8(point_dt(&lseg->p[0], &lseg->p[1]));
}
/*----------------------------------------------------------
* Relative position routines.
*---------------------------------------------------------*/
/*
** find intersection of the two lines, and see if it falls on
** both segments.
*/
Datum
lseg_intersect(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(lseg_intersect_internal(l1, l2));
}
static bool
lseg_intersect_internal(LSEG *l1, LSEG *l2)
{
LINE ln;
Point *interpt;
bool retval;
line_construct_pts(&ln, &l2->p[0], &l2->p[1]);
interpt = interpt_sl(l1, &ln);
if (interpt != NULL && on_ps_internal(interpt, l2))
retval = true; /* interpt on l1 and l2 */
else
retval = false;
return retval;
}
Datum
lseg_parallel(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
#ifdef NOT_USED
PG_RETURN_BOOL(FPeq(l1->m, l2->m));
#endif
PG_RETURN_BOOL(FPeq(point_sl(&l1->p[0], &l1->p[1]),
point_sl(&l2->p[0], &l2->p[1])));
}
/* lseg_perp()
* Determine if two line segments are perpendicular.
*
* This code did not get the correct answer for
* '((0,0),(0,1))'::lseg ?-| '((0,0),(1,0))'::lseg
* So, modified it to check explicitly for slope of vertical line
* returned by point_sl() and the results seem better.
* - thomas 1998-01-31
*/
Datum
lseg_perp(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
double m1,
m2;
m1 = point_sl(&(l1->p[0]), &(l1->p[1]));
m2 = point_sl(&(l2->p[0]), &(l2->p[1]));
#ifdef GEODEBUG
printf("lseg_perp- slopes are %g and %g\n", m1, m2);
#endif
if (FPzero(m1))
PG_RETURN_BOOL(FPeq(m2, DBL_MAX));
else if (FPzero(m2))
PG_RETURN_BOOL(FPeq(m1, DBL_MAX));
PG_RETURN_BOOL(FPeq(m1 / m2, -1.0));
}
Datum
lseg_vertical(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
PG_RETURN_BOOL(FPeq(lseg->p[0].x, lseg->p[1].x));
}
Datum
lseg_horizontal(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
PG_RETURN_BOOL(FPeq(lseg->p[0].y, lseg->p[1].y));
}
Datum
lseg_eq(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPeq(l1->p[0].x, l2->p[0].x) &&
FPeq(l1->p[0].y, l2->p[0].y) &&
FPeq(l1->p[1].x, l2->p[1].x) &&
FPeq(l1->p[1].y, l2->p[1].y));
}
Datum
lseg_ne(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(!FPeq(l1->p[0].x, l2->p[0].x) ||
!FPeq(l1->p[0].y, l2->p[0].y) ||
!FPeq(l1->p[1].x, l2->p[1].x) ||
!FPeq(l1->p[1].y, l2->p[1].y));
}
Datum
lseg_lt(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPlt(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
Datum
lseg_le(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPle(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
Datum
lseg_gt(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPgt(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
Datum
lseg_ge(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(FPge(point_dt(&l1->p[0], &l1->p[1]),
point_dt(&l2->p[0], &l2->p[1])));
}
/*----------------------------------------------------------
* Line arithmetic routines.
*---------------------------------------------------------*/
/* lseg_distance -
* If two segments don't intersect, then the closest
* point will be from one of the endpoints to the other
* segment.
*/
Datum
lseg_distance(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
PG_RETURN_FLOAT8(lseg_dt(l1, l2));
}
/* lseg_dt()
* Distance between two line segments.
* Must check both sets of endpoints to ensure minimum distance is found.
* - thomas 1998-02-01
*/
static double
lseg_dt(LSEG *l1, LSEG *l2)
{
double result,
d;
if (lseg_intersect_internal(l1, l2))
return 0.0;
d = dist_ps_internal(&l1->p[0], l2);
result = d;
d = dist_ps_internal(&l1->p[1], l2);
result = Min(result, d);
d = dist_ps_internal(&l2->p[0], l1);
result = Min(result, d);
d = dist_ps_internal(&l2->p[1], l1);
result = Min(result, d);
return result;
}
Datum
lseg_center(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (lseg->p[0].x + lseg->p[1].x) / 2.0;
result->y = (lseg->p[0].y + lseg->p[1].y) / 2.0;
PG_RETURN_POINT_P(result);
}
static Point *
lseg_interpt_internal(LSEG *l1, LSEG *l2)
{
Point *result;
LINE tmp1,
tmp2;
/*
* Find the intersection of the appropriate lines, if any.
*/
line_construct_pts(&tmp1, &l1->p[0], &l1->p[1]);
line_construct_pts(&tmp2, &l2->p[0], &l2->p[1]);
result = line_interpt_internal(&tmp1, &tmp2);
if (!PointerIsValid(result))
return NULL;
/*
* If the line intersection point isn't within l1 (or equivalently l2),
* there is no valid segment intersection point at all.
*/
if (!on_ps_internal(result, l1) ||
!on_ps_internal(result, l2))
{
pfree(result);
return NULL;
}
/*
* If there is an intersection, then check explicitly for matching
* endpoints since there may be rounding effects with annoying lsb
* residue. - tgl 1997-07-09
*/
if ((FPeq(l1->p[0].x, l2->p[0].x) && FPeq(l1->p[0].y, l2->p[0].y)) ||
(FPeq(l1->p[0].x, l2->p[1].x) && FPeq(l1->p[0].y, l2->p[1].y)))
{
result->x = l1->p[0].x;
result->y = l1->p[0].y;
}
else if ((FPeq(l1->p[1].x, l2->p[0].x) && FPeq(l1->p[1].y, l2->p[0].y)) ||
(FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
{
result->x = l1->p[1].x;
result->y = l1->p[1].y;
}
return result;
}
/* lseg_interpt -
* Find the intersection point of two segments (if any).
*/
Datum
lseg_interpt(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
Point *result;
result = lseg_interpt_internal(l1, l2);
if (!PointerIsValid(result))
PG_RETURN_NULL();
PG_RETURN_POINT_P(result);
}
/***********************************************************************
**
** Routines for position comparisons of differently-typed
** 2D objects.
**
***********************************************************************/
/*---------------------------------------------------------------------
* dist_
* Minimum distance from one object to another.
*-------------------------------------------------------------------*/
Datum
dist_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_FLOAT8(dist_pl_internal(pt, line));
}
static double
dist_pl_internal(Point *pt, LINE *line)
{
return fabs((line->A * pt->x + line->B * pt->y + line->C) /
HYPOT(line->A, line->B));
}
Datum
dist_ps(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
PG_RETURN_FLOAT8(dist_ps_internal(pt, lseg));
}
static double
dist_ps_internal(Point *pt, LSEG *lseg)
{
double m; /* slope of perp. */
LINE *ln;
double result,
tmpdist;
Point *ip;
/*
* Construct a line perpendicular to the input segment and through the
* input point
*/
if (lseg->p[1].x == lseg->p[0].x)
m = 0;
else if (lseg->p[1].y == lseg->p[0].y)
m = (double) DBL_MAX; /* slope is infinite */
else
m = (lseg->p[0].x - lseg->p[1].x) / (lseg->p[1].y - lseg->p[0].y);
ln = line_construct_pm(pt, m);
#ifdef GEODEBUG
printf("dist_ps- line is A=%g B=%g C=%g from (point) slope (%f,%f) %g\n",
ln->A, ln->B, ln->C, pt->x, pt->y, m);
#endif
/*
* Calculate distance to the line segment or to the nearest endpoint of
* the segment.
*/
/* intersection is on the line segment? */
if ((ip = interpt_sl(lseg, ln)) != NULL)
{
/* yes, so use distance to the intersection point */
result = point_dt(pt, ip);
#ifdef GEODEBUG
printf("dist_ps- distance is %f to intersection point is (%f,%f)\n",
result, ip->x, ip->y);
#endif
}
else
{
/* no, so use distance to the nearer endpoint */
result = point_dt(pt, &lseg->p[0]);
tmpdist = point_dt(pt, &lseg->p[1]);
if (tmpdist < result)
result = tmpdist;
}
return result;
}
/*
** Distance from a point to a path
*/
Datum
dist_ppath(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
PATH *path = PG_GETARG_PATH_P(1);
float8 result = 0.0; /* keep compiler quiet */
bool have_min = false;
float8 tmp;
int i;
LSEG lseg;
switch (path->npts)
{
case 0:
/* no points in path? then result is undefined... */
PG_RETURN_NULL();
case 1:
/* one point in path? then get distance between two points... */
result = point_dt(pt, &path->p[0]);
break;
default:
/* make sure the path makes sense... */
Assert(path->npts > 1);
/*
* the distance from a point to a path is the smallest distance
* from the point to any of its constituent segments.
*/
for (i = 0; i < path->npts; i++)
{
int iprev;
if (i > 0)
iprev = i - 1;
else
{
if (!path->closed)
continue;
iprev = path->npts - 1; /* include the closure segment */
}
statlseg_construct(&lseg, &path->p[iprev], &path->p[i]);
tmp = dist_ps_internal(pt, &lseg);
if (!have_min || tmp < result)
{
result = tmp;
have_min = true;
}
}
break;
}
PG_RETURN_FLOAT8(result);
}
Datum
dist_pb(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
BOX *box = PG_GETARG_BOX_P(1);
float8 result;
Point *near;
near = DatumGetPointP(DirectFunctionCall2(close_pb,
PointPGetDatum(pt),
BoxPGetDatum(box)));
result = point_dt(near, pt);
PG_RETURN_FLOAT8(result);
}
Datum
dist_sl(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
float8 result,
d2;
if (has_interpt_sl(lseg, line))
result = 0.0;
else
{
result = dist_pl_internal(&lseg->p[0], line);
d2 = dist_pl_internal(&lseg->p[1], line);
/* XXX shouldn't we take the min not max? */
if (d2 > result)
result = d2;
}
PG_RETURN_FLOAT8(result);
}
Datum
dist_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
Point *tmp;
Datum result;
tmp = DatumGetPointP(DirectFunctionCall2(close_sb,
LsegPGetDatum(lseg),
BoxPGetDatum(box)));
result = DirectFunctionCall2(dist_pb,
PointPGetDatum(tmp),
BoxPGetDatum(box));
PG_RETURN_DATUM(result);
}
Datum
dist_lb(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
#endif
/* need to think about this one for a while */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"dist_lb\" not implemented")));
PG_RETURN_NULL();
}
Datum
dist_cpoly(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
POLYGON *poly = PG_GETARG_POLYGON_P(1);
float8 result;
float8 d;
int i;
LSEG seg;
if (point_inside(&(circle->center), poly->npts, poly->p) != 0)
{
#ifdef GEODEBUG
printf("dist_cpoly- center inside of polygon\n");
#endif
PG_RETURN_FLOAT8(0.0);
}
/* initialize distance with segment between first and last points */
seg.p[0].x = poly->p[0].x;
seg.p[0].y = poly->p[0].y;
seg.p[1].x = poly->p[poly->npts - 1].x;
seg.p[1].y = poly->p[poly->npts - 1].y;
result = dist_ps_internal(&circle->center, &seg);
#ifdef GEODEBUG
printf("dist_cpoly- segment 0/n distance is %f\n", result);
#endif
/* check distances for other segments */
for (i = 0; (i < poly->npts - 1); i++)
{
seg.p[0].x = poly->p[i].x;
seg.p[0].y = poly->p[i].y;
seg.p[1].x = poly->p[i + 1].x;
seg.p[1].y = poly->p[i + 1].y;
d = dist_ps_internal(&circle->center, &seg);
#ifdef GEODEBUG
printf("dist_cpoly- segment %d distance is %f\n", (i + 1), d);
#endif
if (d < result)
result = d;
}
result -= circle->radius;
if (result < 0)
result = 0;
PG_RETURN_FLOAT8(result);
}
/*---------------------------------------------------------------------
* interpt_
* Intersection point of objects.
* We choose to ignore the "point" of intersection between
* lines and boxes, since there are typically two.
*-------------------------------------------------------------------*/
/* Get intersection point of lseg and line; returns NULL if no intersection */
static Point *
interpt_sl(LSEG *lseg, LINE *line)
{
LINE tmp;
Point *p;
line_construct_pts(&tmp, &lseg->p[0], &lseg->p[1]);
p = line_interpt_internal(&tmp, line);
#ifdef GEODEBUG
printf("interpt_sl- segment is (%.*g %.*g) (%.*g %.*g)\n",
DBL_DIG, lseg->p[0].x, DBL_DIG, lseg->p[0].y, DBL_DIG, lseg->p[1].x, DBL_DIG, lseg->p[1].y);
printf("interpt_sl- segment becomes line A=%.*g B=%.*g C=%.*g\n",
DBL_DIG, tmp.A, DBL_DIG, tmp.B, DBL_DIG, tmp.C);
#endif
if (PointerIsValid(p))
{
#ifdef GEODEBUG
printf("interpt_sl- intersection point is (%.*g %.*g)\n", DBL_DIG, p->x, DBL_DIG, p->y);
#endif
if (on_ps_internal(p, lseg))
{
#ifdef GEODEBUG
printf("interpt_sl- intersection point is on segment\n");
#endif
}
else
p = NULL;
}
return p;
}
/* variant: just indicate if intersection point exists */
static bool
has_interpt_sl(LSEG *lseg, LINE *line)
{
Point *tmp;
tmp = interpt_sl(lseg, line);
if (tmp)
return true;
return false;
}
/*---------------------------------------------------------------------
* close_
* Point of closest proximity between objects.
*-------------------------------------------------------------------*/
/* close_pl -
* The intersection point of a perpendicular of the line
* through the point.
*/
Datum
close_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
Point *result;
LINE *tmp;
double invm;
result = (Point *) palloc(sizeof(Point));
if (FPzero(line->B)) /* vertical? */
{
result->x = line->C;
result->y = pt->y;
PG_RETURN_POINT_P(result);
}
if (FPzero(line->A)) /* horizontal? */
{
result->x = pt->x;
result->y = line->C;
PG_RETURN_POINT_P(result);
}
/* drop a perpendicular and find the intersection point */
/* invert and flip the sign on the slope to get a perpendicular */
invm = line->B / line->A;
tmp = line_construct_pm(pt, invm);
result = line_interpt_internal(tmp, line);
Assert(result != NULL);
PG_RETURN_POINT_P(result);
}
/* close_ps()
* Closest point on line segment to specified point.
* Take the closest endpoint if the point is left, right,
* above, or below the segment, otherwise find the intersection
* point of the segment and its perpendicular through the point.
*
* Some tricky code here, relying on boolean expressions
* evaluating to only zero or one to use as an array index.
* bug fixes by gthaker@atl.lmco.com; May 1, 1998
*/
Datum
close_ps(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
Point *result = NULL;
LINE *tmp;
double invm;
int xh,
yh;
#ifdef GEODEBUG
printf("close_sp:pt->x %f pt->y %f\nlseg(0).x %f lseg(0).y %f lseg(1).x %f lseg(1).y %f\n",
pt->x, pt->y, lseg->p[0].x, lseg->p[0].y,
lseg->p[1].x, lseg->p[1].y);
#endif
/* xh (or yh) is the index of upper x( or y) end point of lseg */
/* !xh (or !yh) is the index of lower x( or y) end point of lseg */
xh = lseg->p[0].x < lseg->p[1].x;
yh = lseg->p[0].y < lseg->p[1].y;
if (FPeq(lseg->p[0].x, lseg->p[1].x)) /* vertical? */
{
#ifdef GEODEBUG
printf("close_ps- segment is vertical\n");
#endif
/* first check if point is below or above the entire lseg. */
if (pt->y < lseg->p[!yh].y)
result = point_copy(&lseg->p[!yh]); /* below the lseg */
else if (pt->y > lseg->p[yh].y)
result = point_copy(&lseg->p[yh]); /* above the lseg */
if (result != NULL)
PG_RETURN_POINT_P(result);
/* point lines along (to left or right) of the vertical lseg. */
result = (Point *) palloc(sizeof(Point));
result->x = lseg->p[0].x;
result->y = pt->y;
PG_RETURN_POINT_P(result);
}
else if (FPeq(lseg->p[0].y, lseg->p[1].y)) /* horizontal? */
{
#ifdef GEODEBUG
printf("close_ps- segment is horizontal\n");
#endif
/* first check if point is left or right of the entire lseg. */
if (pt->x < lseg->p[!xh].x)
result = point_copy(&lseg->p[!xh]); /* left of the lseg */
else if (pt->x > lseg->p[xh].x)
result = point_copy(&lseg->p[xh]); /* right of the lseg */
if (result != NULL)
PG_RETURN_POINT_P(result);
/* point lines along (at top or below) the horiz. lseg. */
result = (Point *) palloc(sizeof(Point));
result->x = pt->x;
result->y = lseg->p[0].y;
PG_RETURN_POINT_P(result);
}
/*
* vert. and horiz. cases are down, now check if the closest point is one
* of the end points or someplace on the lseg.
*/
invm = -1.0 / point_sl(&(lseg->p[0]), &(lseg->p[1]));
tmp = line_construct_pm(&lseg->p[!yh], invm); /* lower edge of the
* "band" */
if (pt->y < (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
result = point_copy(&lseg->p[!yh]); /* below the lseg, take lower
* end pt */
#ifdef GEODEBUG
printf("close_ps below: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
#endif
PG_RETURN_POINT_P(result);
}
tmp = line_construct_pm(&lseg->p[yh], invm); /* upper edge of the
* "band" */
if (pt->y > (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
result = point_copy(&lseg->p[yh]); /* above the lseg, take higher
* end pt */
#ifdef GEODEBUG
printf("close_ps above: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
#endif
PG_RETURN_POINT_P(result);
}
/*
* at this point the "normal" from point will hit lseg. The closet point
* will be somewhere on the lseg
*/
tmp = line_construct_pm(pt, invm);
#ifdef GEODEBUG
printf("close_ps- tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
#endif
result = interpt_sl(lseg, tmp);
Assert(result != NULL);
#ifdef GEODEBUG
printf("close_ps- result.x %f result.y %f\n", result->x, result->y);
#endif
PG_RETURN_POINT_P(result);
}
/* close_lseg()
* Closest point to l1 on l2.
*/
Datum
close_lseg(PG_FUNCTION_ARGS)
{
LSEG *l1 = PG_GETARG_LSEG_P(0);
LSEG *l2 = PG_GETARG_LSEG_P(1);
Point *result = NULL;
Point point;
double dist;
double d;
d = dist_ps_internal(&l1->p[0], l2);
dist = d;
memcpy(&point, &l1->p[0], sizeof(Point));
if ((d = dist_ps_internal(&l1->p[1], l2)) < dist)
{
dist = d;
memcpy(&point, &l1->p[1], sizeof(Point));
}
if (dist_ps_internal(&l2->p[0], l1) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&l2->p[0]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if (dist_ps_internal(&l2->p[1], l1) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&l2->p[1]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if (result == NULL)
result = point_copy(&point);
PG_RETURN_POINT_P(result);
}
/* close_pb()
* Closest point on or in box to specified point.
*/
Datum
close_pb(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
BOX *box = PG_GETARG_BOX_P(1);
LSEG lseg,
seg;
Point point;
double dist,
d;
if (DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(pt),
BoxPGetDatum(box))))
PG_RETURN_POINT_P(pt);
/* pairwise check lseg distances */
point.x = box->low.x;
point.y = box->high.y;
statlseg_construct(&lseg, &box->low, &point);
dist = dist_ps_internal(pt, &lseg);
statlseg_construct(&seg, &box->high, &point);
if ((d = dist_ps_internal(pt, &seg)) < dist)
{
dist = d;
memcpy(&lseg, &seg, sizeof(lseg));
}
point.x = box->high.x;
point.y = box->low.y;
statlseg_construct(&seg, &box->low, &point);
if ((d = dist_ps_internal(pt, &seg)) < dist)
{
dist = d;
memcpy(&lseg, &seg, sizeof(lseg));
}
statlseg_construct(&seg, &box->high, &point);
if ((d = dist_ps_internal(pt, &seg)) < dist)
{
dist = d;
memcpy(&lseg, &seg, sizeof(lseg));
}
PG_RETURN_DATUM(DirectFunctionCall2(close_ps,
PointPGetDatum(pt),
LsegPGetDatum(&lseg)));
}
/* close_sl()
* Closest point on line to line segment.
*
* XXX THIS CODE IS WRONG
* The code is actually calculating the point on the line segment
* which is backwards from the routine naming convention.
* Copied code to new routine close_ls() but haven't fixed this one yet.
* - thomas 1998-01-31
*/
Datum
close_sl(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
Point *result;
float8 d1,
d2;
result = interpt_sl(lseg, line);
if (result)
PG_RETURN_POINT_P(result);
d1 = dist_pl_internal(&lseg->p[0], line);
d2 = dist_pl_internal(&lseg->p[1], line);
if (d1 < d2)
result = point_copy(&lseg->p[0]);
else
result = point_copy(&lseg->p[1]);
PG_RETURN_POINT_P(result);
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"close_sl\" not implemented")));
PG_RETURN_NULL();
}
/* close_ls()
* Closest point on line segment to line.
*/
Datum
close_ls(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
Point *result;
float8 d1,
d2;
result = interpt_sl(lseg, line);
if (result)
PG_RETURN_POINT_P(result);
d1 = dist_pl_internal(&lseg->p[0], line);
d2 = dist_pl_internal(&lseg->p[1], line);
if (d1 < d2)
result = point_copy(&lseg->p[0]);
else
result = point_copy(&lseg->p[1]);
PG_RETURN_POINT_P(result);
}
/* close_sb()
* Closest point on or in box to line segment.
*/
Datum
close_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
Point point;
LSEG bseg,
seg;
double dist,
d;
/* segment intersects box? then just return closest point to center */
if (DatumGetBool(DirectFunctionCall2(inter_sb,
LsegPGetDatum(lseg),
BoxPGetDatum(box))))
{
box_cn(&point, box);
PG_RETURN_DATUM(DirectFunctionCall2(close_ps,
PointPGetDatum(&point),
LsegPGetDatum(lseg)));
}
/* pairwise check lseg distances */
point.x = box->low.x;
point.y = box->high.y;
statlseg_construct(&bseg, &box->low, &point);
dist = lseg_dt(lseg, &bseg);
statlseg_construct(&seg, &box->high, &point);
if ((d = lseg_dt(lseg, &seg)) < dist)
{
dist = d;
memcpy(&bseg, &seg, sizeof(bseg));
}
point.x = box->high.x;
point.y = box->low.y;
statlseg_construct(&seg, &box->low, &point);
if ((d = lseg_dt(lseg, &seg)) < dist)
{
dist = d;
memcpy(&bseg, &seg, sizeof(bseg));
}
statlseg_construct(&seg, &box->high, &point);
if ((d = lseg_dt(lseg, &seg)) < dist)
{
dist = d;
memcpy(&bseg, &seg, sizeof(bseg));
}
/* OK, we now have the closest line segment on the box boundary */
PG_RETURN_DATUM(DirectFunctionCall2(close_lseg,
LsegPGetDatum(lseg),
LsegPGetDatum(&bseg)));
}
Datum
close_lb(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
#endif
/* think about this one for a while */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"close_lb\" not implemented")));
PG_RETURN_NULL();
}
/*---------------------------------------------------------------------
* on_
* Whether one object lies completely within another.
*-------------------------------------------------------------------*/
/* on_pl -
* Does the point satisfy the equation?
*/
Datum
on_pl(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(FPzero(line->A * pt->x + line->B * pt->y + line->C));
}
/* on_ps -
* Determine colinearity by detecting a triangle inequality.
* This algorithm seems to behave nicely even with lsb residues - tgl 1997-07-09
*/
Datum
on_ps(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
LSEG *lseg = PG_GETARG_LSEG_P(1);
PG_RETURN_BOOL(on_ps_internal(pt, lseg));
}
static bool
on_ps_internal(Point *pt, LSEG *lseg)
{
return FPeq(point_dt(pt, &lseg->p[0]) + point_dt(pt, &lseg->p[1]),
point_dt(&lseg->p[0], &lseg->p[1]));
}
Datum
on_pb(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(pt->x <= box->high.x && pt->x >= box->low.x &&
pt->y <= box->high.y && pt->y >= box->low.y);
}
Datum
box_contain_pt(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *pt = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(pt->x <= box->high.x && pt->x >= box->low.x &&
pt->y <= box->high.y && pt->y >= box->low.y);
}
/* on_ppath -
* Whether a point lies within (on) a polyline.
* If open, we have to (groan) check each segment.
* (uses same algorithm as for point intersecting segment - tgl 1997-07-09)
* If closed, we use the old O(n) ray method for point-in-polygon.
* The ray is horizontal, from pt out to the right.
* Each segment that crosses the ray counts as an
* intersection; note that an endpoint or edge may touch
* but not cross.
* (we can do p-in-p in lg(n), but it takes preprocessing)
*/
Datum
on_ppath(PG_FUNCTION_ARGS)
{
Point *pt = PG_GETARG_POINT_P(0);
PATH *path = PG_GETARG_PATH_P(1);
int i,
n;
double a,
b;
/*-- OPEN --*/
if (!path->closed)
{
n = path->npts - 1;
a = point_dt(pt, &path->p[0]);
for (i = 0; i < n; i++)
{
b = point_dt(pt, &path->p[i + 1]);
if (FPeq(a + b,
point_dt(&path->p[i], &path->p[i + 1])))
PG_RETURN_BOOL(true);
a = b;
}
PG_RETURN_BOOL(false);
}
/*-- CLOSED --*/
PG_RETURN_BOOL(point_inside(pt, path->npts, path->p) != 0);
}
Datum
on_sl(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl,
PointPGetDatum(&lseg->p[0]),
LinePGetDatum(line))) &&
DatumGetBool(DirectFunctionCall2(on_pl,
PointPGetDatum(&lseg->p[1]),
LinePGetDatum(line))));
}
Datum
on_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) &&
DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))));
}
/*---------------------------------------------------------------------
* inter_
* Whether one object intersects another.
*-------------------------------------------------------------------*/
Datum
inter_sl(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(has_interpt_sl(lseg, line));
}
/* inter_sb()
* Do line segment and box intersect?
*
* Segment completely inside box counts as intersection.
* If you want only segments crossing box boundaries,
* try converting box to path first.
*
* Optimize for non-intersection by checking for box intersection first.
* - thomas 1998-01-30
*/
Datum
inter_sb(PG_FUNCTION_ARGS)
{
LSEG *lseg = PG_GETARG_LSEG_P(0);
BOX *box = PG_GETARG_BOX_P(1);
BOX lbox;
LSEG bseg;
Point point;
lbox.low.x = Min(lseg->p[0].x, lseg->p[1].x);
lbox.low.y = Min(lseg->p[0].y, lseg->p[1].y);
lbox.high.x = Max(lseg->p[0].x, lseg->p[1].x);
lbox.high.y = Max(lseg->p[0].y, lseg->p[1].y);
/* nothing close to overlap? then not going to intersect */
if (!box_ov(&lbox, box))
PG_RETURN_BOOL(false);
/* an endpoint of segment is inside box? then clearly intersects */
if (DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) ||
DatumGetBool(DirectFunctionCall2(on_pb,
PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))))
PG_RETURN_BOOL(true);
/* pairwise check lseg intersections */
point.x = box->low.x;
point.y = box->high.y;
statlseg_construct(&bseg, &box->low, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
statlseg_construct(&bseg, &box->high, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
point.x = box->high.x;
point.y = box->low.y;
statlseg_construct(&bseg, &box->low, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
statlseg_construct(&bseg, &box->high, &point);
if (lseg_intersect_internal(&bseg, lseg))
PG_RETURN_BOOL(true);
/* if we dropped through, no two segs intersected */
PG_RETURN_BOOL(false);
}
/* inter_lb()
* Do line and box intersect?
*/
Datum
inter_lb(PG_FUNCTION_ARGS)
{
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
LSEG bseg;
Point p1,
p2;
/* pairwise check lseg intersections */
p1.x = box->low.x;
p1.y = box->low.y;
p2.x = box->low.x;
p2.y = box->high.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
p1.x = box->high.x;
p1.y = box->high.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
p2.x = box->high.x;
p2.y = box->low.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
p1.x = box->low.x;
p1.y = box->low.y;
statlseg_construct(&bseg, &p1, &p2);
if (has_interpt_sl(&bseg, line))
PG_RETURN_BOOL(true);
/* if we dropped through, no intersection */
PG_RETURN_BOOL(false);
}
/*------------------------------------------------------------------
* The following routines define a data type and operator class for
* POLYGONS .... Part of which (the polygon's bounding box) is built on
* top of the BOX data type.
*
* make_bound_box - create the bounding box for the input polygon
*------------------------------------------------------------------*/
/*---------------------------------------------------------------------
* Make the smallest bounding box for the given polygon.
*---------------------------------------------------------------------*/
static void
make_bound_box(POLYGON *poly)
{
int i;
double x1,
y1,
x2,
y2;
if (poly->npts > 0)
{
x2 = x1 = poly->p[0].x;
y2 = y1 = poly->p[0].y;
for (i = 1; i < poly->npts; i++)
{
if (poly->p[i].x < x1)
x1 = poly->p[i].x;
if (poly->p[i].x > x2)
x2 = poly->p[i].x;
if (poly->p[i].y < y1)
y1 = poly->p[i].y;
if (poly->p[i].y > y2)
y2 = poly->p[i].y;
}
box_fill(&(poly->boundbox), x1, x2, y1, y2);
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot create bounding box for empty polygon")));
}
/*------------------------------------------------------------------
* poly_in - read in the polygon from a string specification
*
* External format:
* "((x0,y0),...,(xn,yn))"
* "x0,y0,...,xn,yn"
* also supports the older style "(x1,...,xn,y1,...yn)"
*------------------------------------------------------------------*/
Datum
poly_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
POLYGON *poly;
int npts;
int size;
int base_size;
int isopen;
char *s;
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
base_size = sizeof(poly->p[0]) * npts;
size = offsetof(POLYGON, p[0]) + base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(poly->p[0]) || size <= base_size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
poly = (POLYGON *) palloc0(size); /* zero any holes */
SET_VARSIZE(poly, size);
poly->npts = npts;
if ((!path_decode(FALSE, npts, str, &isopen, &s, &(poly->p[0])))
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/*---------------------------------------------------------------
* poly_out - convert internal POLYGON representation to the
* character string format "((f8,f8),...,(f8,f8))"
*---------------------------------------------------------------*/
Datum
poly_out(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
PG_RETURN_CSTRING(path_encode(PATH_CLOSED, poly->npts, poly->p));
}
/*
* poly_recv - converts external binary format to polygon
*
* External representation is int32 number of points, and the points.
* We recompute the bounding box on read, instead of trusting it to
* be valid. (Checking it would take just as long, so may as well
* omit it from external representation.)
*/
Datum
poly_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
POLYGON *poly;
int32 npts;
int32 i;
int size;
npts = pq_getmsgint(buf, sizeof(int32));
if (npts <= 0 || npts >= (int32) ((INT_MAX - offsetof(POLYGON, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid number of points in external \"polygon\" value")));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
SET_VARSIZE(poly, size);
poly->npts = npts;
for (i = 0; i < npts; i++)
{
poly->p[i].x = pq_getmsgfloat8(buf);
poly->p[i].y = pq_getmsgfloat8(buf);
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/*
* poly_send - converts polygon to binary format
*/
Datum
poly_send(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
StringInfoData buf;
int32 i;
pq_begintypsend(&buf);
pq_sendint(&buf, poly->npts, sizeof(int32));
for (i = 0; i < poly->npts; i++)
{
pq_sendfloat8(&buf, poly->p[i].x);
pq_sendfloat8(&buf, poly->p[i].y);
}
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*-------------------------------------------------------
* Is polygon A strictly left of polygon B? i.e. is
* the right most point of A left of the left most point
* of B?
*-------------------------------------------------------*/
Datum
poly_left(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.x < polyb->boundbox.low.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or left of polygon B? i.e. is
* the right most point of A at or left of the right most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overleft(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.x <= polyb->boundbox.high.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A strictly right of polygon B? i.e. is
* the left most point of A right of the right most point
* of B?
*-------------------------------------------------------*/
Datum
poly_right(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.x > polyb->boundbox.high.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or right of polygon B? i.e. is
* the left most point of A at or right of the left most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overright(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.x >= polyb->boundbox.low.x;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A strictly below polygon B? i.e. is
* the upper most point of A below the lower most point
* of B?
*-------------------------------------------------------*/
Datum
poly_below(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.y < polyb->boundbox.low.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or below polygon B? i.e. is
* the upper most point of A at or below the upper most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overbelow(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.y <= polyb->boundbox.high.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A strictly above polygon B? i.e. is
* the lower most point of A above the upper most point
* of B?
*-------------------------------------------------------*/
Datum
poly_above(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.y > polyb->boundbox.high.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A overlapping or above polygon B? i.e. is
* the lower most point of A at or above the lower most point
* of B?
*-------------------------------------------------------*/
Datum
poly_overabove(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.y >= polyb->boundbox.low.y;
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-------------------------------------------------------
* Is polygon A the same as polygon B? i.e. are all the
* points the same?
* Check all points for matches in both forward and reverse
* direction since polygons are non-directional and are
* closed shapes.
*-------------------------------------------------------*/
Datum
poly_same(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
if (polya->npts != polyb->npts)
result = false;
else
result = plist_same(polya->npts, polya->p, polyb->p);
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-----------------------------------------------------------------
* Determine if polygon A overlaps polygon B
*-----------------------------------------------------------------*/
Datum
poly_overlap(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
/* Quick check by bounding box */
result = (polya->npts > 0 && polyb->npts > 0 &&
box_ov(&polya->boundbox, &polyb->boundbox)) ? true : false;
/*
* Brute-force algorithm - try to find intersected edges, if so then
* polygons are overlapped else check is one polygon inside other or not
* by testing single point of them.
*/
if (result)
{
int ia,
ib;
LSEG sa,
sb;
/* Init first of polya's edge with last point */
sa.p[0] = polya->p[polya->npts - 1];
result = false;
for (ia = 0; ia < polya->npts && result == false; ia++)
{
/* Second point of polya's edge is a current one */
sa.p[1] = polya->p[ia];
/* Init first of polyb's edge with last point */
sb.p[0] = polyb->p[polyb->npts - 1];
for (ib = 0; ib < polyb->npts && result == false; ib++)
{
sb.p[1] = polyb->p[ib];
result = lseg_intersect_internal(&sa, &sb);
sb.p[0] = sb.p[1];
}
/*
* move current endpoint to the first point of next edge
*/
sa.p[0] = sa.p[1];
}
if (result == false)
{
result = (point_inside(polya->p, polyb->npts, polyb->p)
||
point_inside(polyb->p, polya->npts, polya->p));
}
}
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*
* Tests special kind of segment for in/out of polygon.
* Special kind means:
* - point a should be on segment s
* - segment (a,b) should not be contained by s
* Returns true if:
* - segment (a,b) is collinear to s and (a,b) is in polygon
* - segment (a,b) s not collinear to s. Note: that doesn't
* mean that segment is in polygon!
*/
static bool
touched_lseg_inside_poly(Point *a, Point *b, LSEG *s, POLYGON *poly, int start)
{
/* point a is on s, b is not */
LSEG t;
t.p[0] = *a;
t.p[1] = *b;
#define POINTEQ(pt1, pt2) (FPeq((pt1)->x, (pt2)->x) && FPeq((pt1)->y, (pt2)->y))
if (POINTEQ(a, s->p))
{
if (on_ps_internal(s->p + 1, &t))
return lseg_inside_poly(b, s->p + 1, poly, start);
}
else if (POINTEQ(a, s->p + 1))
{
if (on_ps_internal(s->p, &t))
return lseg_inside_poly(b, s->p, poly, start);
}
else if (on_ps_internal(s->p, &t))
{
return lseg_inside_poly(b, s->p, poly, start);
}
else if (on_ps_internal(s->p + 1, &t))
{
return lseg_inside_poly(b, s->p + 1, poly, start);
}
return true; /* may be not true, but that will check later */
}
/*
* Returns true if segment (a,b) is in polygon, option
* start is used for optimization - function checks
* polygon's edges started from start
*/
static bool
lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start)
{
LSEG s,
t;
int i;
bool res = true,
intersection = false;
t.p[0] = *a;
t.p[1] = *b;
s.p[0] = poly->p[(start == 0) ? (poly->npts - 1) : (start - 1)];
for (i = start; i < poly->npts && res; i++)
{
Point *interpt;
s.p[1] = poly->p[i];
if (on_ps_internal(t.p, &s))
{
if (on_ps_internal(t.p + 1, &s))
return true; /* t is contained by s */
/* Y-cross */
res = touched_lseg_inside_poly(t.p, t.p + 1, &s, poly, i + 1);
}
else if (on_ps_internal(t.p + 1, &s))
{
/* Y-cross */
res = touched_lseg_inside_poly(t.p + 1, t.p, &s, poly, i + 1);
}
else if ((interpt = lseg_interpt_internal(&t, &s)) != NULL)
{
/*
* segments are X-crossing, go to check each subsegment
*/
intersection = true;
res = lseg_inside_poly(t.p, interpt, poly, i + 1);
if (res)
res = lseg_inside_poly(t.p + 1, interpt, poly, i + 1);
pfree(interpt);
}
s.p[0] = s.p[1];
}
if (res && !intersection)
{
Point p;
/*
* if X-intersection wasn't found then check central point of tested
* segment. In opposite case we already check all subsegments
*/
p.x = (t.p[0].x + t.p[1].x) / 2.0;
p.y = (t.p[0].y + t.p[1].y) / 2.0;
res = point_inside(&p, poly->npts, poly->p);
}
return res;
}
/*-----------------------------------------------------------------
* Determine if polygon A contains polygon B.
*-----------------------------------------------------------------*/
Datum
poly_contain(PG_FUNCTION_ARGS)
{
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
/*
* Quick check to see if bounding box is contained.
*/
if (polya->npts > 0 && polyb->npts > 0 &&
DatumGetBool(DirectFunctionCall2(box_contain,
BoxPGetDatum(&polya->boundbox),
BoxPGetDatum(&polyb->boundbox))))
{
int i;
LSEG s;
s.p[0] = polyb->p[polyb->npts - 1];
result = true;
for (i = 0; i < polyb->npts && result; i++)
{
s.p[1] = polyb->p[i];
result = lseg_inside_poly(s.p, s.p + 1, polya, 0);
s.p[0] = s.p[1];
}
}
else
{
result = false;
}
/*
* Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
PG_RETURN_BOOL(result);
}
/*-----------------------------------------------------------------
* Determine if polygon A is contained by polygon B
*-----------------------------------------------------------------*/
Datum
poly_contained(PG_FUNCTION_ARGS)
{
Datum polya = PG_GETARG_DATUM(0);
Datum polyb = PG_GETARG_DATUM(1);
/* Just switch the arguments and pass it off to poly_contain */
PG_RETURN_DATUM(DirectFunctionCall2(poly_contain, polyb, polya));
}
Datum
poly_contain_pt(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(point_inside(p, poly->npts, poly->p) != 0);
}
Datum
pt_contained_poly(PG_FUNCTION_ARGS)
{
Point *p = PG_GETARG_POINT_P(0);
POLYGON *poly = PG_GETARG_POLYGON_P(1);
PG_RETURN_BOOL(point_inside(p, poly->npts, poly->p) != 0);
}
Datum
poly_distance(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
POLYGON *polya = PG_GETARG_POLYGON_P(0);
POLYGON *polyb = PG_GETARG_POLYGON_P(1);
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"poly_distance\" not implemented")));
PG_RETURN_NULL();
}
/***********************************************************************
**
** Routines for 2D points.
**
***********************************************************************/
Datum
construct_point(PG_FUNCTION_ARGS)
{
float8 x = PG_GETARG_FLOAT8(0);
float8 y = PG_GETARG_FLOAT8(1);
PG_RETURN_POINT_P(point_construct(x, y));
}
Datum
point_add(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (p1->x + p2->x);
result->y = (p1->y + p2->y);
PG_RETURN_POINT_P(result);
}
Datum
point_sub(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (p1->x - p2->x);
result->y = (p1->y - p2->y);
PG_RETURN_POINT_P(result);
}
Datum
point_mul(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = (p1->x * p2->x) - (p1->y * p2->y);
result->y = (p1->x * p2->y) + (p1->y * p2->x);
PG_RETURN_POINT_P(result);
}
Datum
point_div(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
Point *result;
double div;
result = (Point *) palloc(sizeof(Point));
div = (p2->x * p2->x) + (p2->y * p2->y);
if (div == 0.0)
ereport(ERROR,
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
result->x = ((p1->x * p2->x) + (p1->y * p2->y)) / div;
result->y = ((p2->x * p1->y) - (p2->y * p1->x)) / div;
PG_RETURN_POINT_P(result);
}
/***********************************************************************
**
** Routines for 2D boxes.
**
***********************************************************************/
Datum
points_box(PG_FUNCTION_ARGS)
{
Point *p1 = PG_GETARG_POINT_P(0);
Point *p2 = PG_GETARG_POINT_P(1);
PG_RETURN_BOX_P(box_construct(p1->x, p2->x, p1->y, p2->y));
}
Datum
box_add(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOX_P(box_construct((box->high.x + p->x),
(box->low.x + p->x),
(box->high.y + p->y),
(box->low.y + p->y)));
}
Datum
box_sub(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOX_P(box_construct((box->high.x - p->x),
(box->low.x - p->x),
(box->high.y - p->y),
(box->low.y - p->y)));
}
Datum
box_mul(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
BOX *result;
Point *high,
*low;
high = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&box->high),
PointPGetDatum(p)));
low = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&box->low),
PointPGetDatum(p)));
result = box_construct(high->x, low->x, high->y, low->y);
PG_RETURN_BOX_P(result);
}
Datum
box_div(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
Point *p = PG_GETARG_POINT_P(1);
BOX *result;
Point *high,
*low;
high = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&box->high),
PointPGetDatum(p)));
low = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&box->low),
PointPGetDatum(p)));
result = box_construct(high->x, low->x, high->y, low->y);
PG_RETURN_BOX_P(result);
}
/***********************************************************************
**
** Routines for 2D paths.
**
***********************************************************************/
/* path_add()
* Concatenate two paths (only if they are both open).
*/
Datum
path_add(PG_FUNCTION_ARGS)
{
PATH *p1 = PG_GETARG_PATH_P(0);
PATH *p2 = PG_GETARG_PATH_P(1);
PATH *result;
int size,
base_size;
int i;
if (p1->closed || p2->closed)
PG_RETURN_NULL();
base_size = sizeof(p1->p[0]) * (p1->npts + p2->npts);
size = offsetof(PATH, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / sizeof(p1->p[0]) != (p1->npts + p2->npts) ||
size <= base_size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
result = (PATH *) palloc(size);
SET_VARSIZE(result, size);
result->npts = (p1->npts + p2->npts);
result->closed = p1->closed;
/* prevent instability in unused pad bytes */
result->dummy = 0;
for (i = 0; i < p1->npts; i++)
{
result->p[i].x = p1->p[i].x;
result->p[i].y = p1->p[i].y;
}
for (i = 0; i < p2->npts; i++)
{
result->p[i + p1->npts].x = p2->p[i].x;
result->p[i + p1->npts].y = p2->p[i].y;
}
PG_RETURN_PATH_P(result);
}
/* path_add_pt()
* Translation operators.
*/
Datum
path_add_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
int i;
for (i = 0; i < path->npts; i++)
{
path->p[i].x += point->x;
path->p[i].y += point->y;
}
PG_RETURN_PATH_P(path);
}
Datum
path_sub_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
int i;
for (i = 0; i < path->npts; i++)
{
path->p[i].x -= point->x;
path->p[i].y -= point->y;
}
PG_RETURN_PATH_P(path);
}
/* path_mul_pt()
* Rotation and scaling operators.
*/
Datum
path_mul_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
Point *p;
int i;
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
}
PG_RETURN_PATH_P(path);
}
Datum
path_div_pt(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P_COPY(0);
Point *point = PG_GETARG_POINT_P(1);
Point *p;
int i;
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
}
PG_RETURN_PATH_P(path);
}
Datum
path_center(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
PATH *path = PG_GETARG_PATH_P(0);
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"path_center\" not implemented")));
PG_RETURN_NULL();
}
Datum
path_poly(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
POLYGON *poly;
int size;
int i;
/* This is not very consistent --- other similar cases return NULL ... */
if (!path->closed)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("open path cannot be converted to polygon")));
/*
* Never overflows: the old size fit in MaxAllocSize, and the new size is
* just a small constant larger.
*/
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * path->npts;
poly = (POLYGON *) palloc(size);
SET_VARSIZE(poly, size);
poly->npts = path->npts;
for (i = 0; i < path->npts; i++)
{
poly->p[i].x = path->p[i].x;
poly->p[i].y = path->p[i].y;
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/***********************************************************************
**
** Routines for 2D polygons.
**
***********************************************************************/
Datum
poly_npoints(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
PG_RETURN_INT32(poly->npts);
}
Datum
poly_center(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
Datum result;
CIRCLE *circle;
circle = DatumGetCircleP(DirectFunctionCall1(poly_circle,
PolygonPGetDatum(poly)));
result = DirectFunctionCall1(circle_center,
CirclePGetDatum(circle));
PG_RETURN_DATUM(result);
}
Datum
poly_box(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
BOX *box;
if (poly->npts < 1)
PG_RETURN_NULL();
box = box_copy(&poly->boundbox);
PG_RETURN_BOX_P(box);
}
/* box_poly()
* Convert a box to a polygon.
*/
Datum
box_poly(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
POLYGON *poly;
int size;
/* map four corners of the box to a polygon */
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * 4;
poly = (POLYGON *) palloc(size);
SET_VARSIZE(poly, size);
poly->npts = 4;
poly->p[0].x = box->low.x;
poly->p[0].y = box->low.y;
poly->p[1].x = box->low.x;
poly->p[1].y = box->high.y;
poly->p[2].x = box->high.x;
poly->p[2].y = box->high.y;
poly->p[3].x = box->high.x;
poly->p[3].y = box->low.y;
box_fill(&poly->boundbox, box->high.x, box->low.x,
box->high.y, box->low.y);
PG_RETURN_POLYGON_P(poly);
}
Datum
poly_path(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
PATH *path;
int size;
int i;
/*
* Never overflows: the old size fit in MaxAllocSize, and the new size is
* smaller by a small constant.
*/
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * poly->npts;
path = (PATH *) palloc(size);
SET_VARSIZE(path, size);
path->npts = poly->npts;
path->closed = TRUE;
/* prevent instability in unused pad bytes */
path->dummy = 0;
for (i = 0; i < poly->npts; i++)
{
path->p[i].x = poly->p[i].x;
path->p[i].y = poly->p[i].y;
}
PG_RETURN_PATH_P(path);
}
/***********************************************************************
**
** Routines for circles.
**
***********************************************************************/
/*----------------------------------------------------------
* Formatting and conversion routines.
*---------------------------------------------------------*/
/* circle_in - convert a string to internal form.
*
* External format: (center and radius of circle)
* "((f8,f8)<f8>)"
* also supports quick entry style "(f8,f8,f8)"
*/
Datum
circle_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
CIRCLE *circle;
char *s,
*cp;
int depth = 0;
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
s = str;
while (isspace((unsigned char) *s))
s++;
if ((*s == LDELIM_C) || (*s == LDELIM))
{
depth++;
cp = (s + 1);
while (isspace((unsigned char) *cp))
cp++;
if (*cp == LDELIM)
s = cp;
}
if (!pair_decode(s, &circle->center.x, &circle->center.y, &s))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
if (*s == DELIM)
s++;
while (isspace((unsigned char) *s))
s++;
if ((!single_decode(s, &circle->radius, &s)) || (circle->radius < 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
while (depth > 0)
{
if ((*s == RDELIM)
|| ((*s == RDELIM_C) && (depth == 1)))
{
depth--;
s++;
while (isspace((unsigned char) *s))
s++;
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
}
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type circle: \"%s\"", str)));
PG_RETURN_CIRCLE_P(circle);
}
/* circle_out - convert a circle to external form.
*/
Datum
circle_out(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
char *result;
char *cp;
result = palloc(2 * P_MAXLEN + 6);
cp = result;
*cp++ = LDELIM_C;
*cp++ = LDELIM;
if (!pair_encode(circle->center.x, circle->center.y, cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not format \"circle\" value")));
cp += strlen(cp);
*cp++ = RDELIM;
*cp++ = DELIM;
if (!single_encode(circle->radius, cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("could not format \"circle\" value")));
cp += strlen(cp);
*cp++ = RDELIM_C;
*cp = '\0';
PG_RETURN_CSTRING(result);
}
/*
* circle_recv - converts external binary format to circle
*/
Datum
circle_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
CIRCLE *circle;
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
circle->center.x = pq_getmsgfloat8(buf);
circle->center.y = pq_getmsgfloat8(buf);
circle->radius = pq_getmsgfloat8(buf);
if (circle->radius < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid radius in external \"circle\" value")));
PG_RETURN_CIRCLE_P(circle);
}
/*
* circle_send - converts circle to binary format
*/
Datum
circle_send(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendfloat8(&buf, circle->center.x);
pq_sendfloat8(&buf, circle->center.y);
pq_sendfloat8(&buf, circle->radius);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*----------------------------------------------------------
* Relational operators for CIRCLEs.
* <, >, <=, >=, and == are based on circle area.
*---------------------------------------------------------*/
/* circles identical?
*/
Datum
circle_same(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPeq(circle1->radius, circle2->radius) &&
FPeq(circle1->center.x, circle2->center.x) &&
FPeq(circle1->center.y, circle2->center.y));
}
/* circle_overlap - does circle1 overlap circle2?
*/
Datum
circle_overlap(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle(point_dt(&circle1->center, &circle2->center),
circle1->radius + circle2->radius));
}
/* circle_overleft - is the right edge of circle1 at or left of
* the right edge of circle2?
*/
Datum
circle_overleft(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((circle1->center.x + circle1->radius),
(circle2->center.x + circle2->radius)));
}
/* circle_left - is circle1 strictly left of circle2?
*/
Datum
circle_left(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPlt((circle1->center.x + circle1->radius),
(circle2->center.x - circle2->radius)));
}
/* circle_right - is circle1 strictly right of circle2?
*/
Datum
circle_right(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPgt((circle1->center.x - circle1->radius),
(circle2->center.x + circle2->radius)));
}
/* circle_overright - is the left edge of circle1 at or right of
* the left edge of circle2?
*/
Datum
circle_overright(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPge((circle1->center.x - circle1->radius),
(circle2->center.x - circle2->radius)));
}
/* circle_contained - is circle1 contained by circle2?
*/
Datum
circle_contained(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((point_dt(&circle1->center, &circle2->center) + circle1->radius), circle2->radius));
}
/* circle_contain - does circle1 contain circle2?
*/
Datum
circle_contain(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((point_dt(&circle1->center, &circle2->center) + circle2->radius), circle1->radius));
}
/* circle_below - is circle1 strictly below circle2?
*/
Datum
circle_below(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPlt((circle1->center.y + circle1->radius),
(circle2->center.y - circle2->radius)));
}
/* circle_above - is circle1 strictly above circle2?
*/
Datum
circle_above(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPgt((circle1->center.y - circle1->radius),
(circle2->center.y + circle2->radius)));
}
/* circle_overbelow - is the upper edge of circle1 at or below
* the upper edge of circle2?
*/
Datum
circle_overbelow(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((circle1->center.y + circle1->radius),
(circle2->center.y + circle2->radius)));
}
/* circle_overabove - is the lower edge of circle1 at or above
* the lower edge of circle2?
*/
Datum
circle_overabove(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPge((circle1->center.y - circle1->radius),
(circle2->center.y - circle2->radius)));
}
/* circle_relop - is area(circle1) relop area(circle2), within
* our accuracy constraint?
*/
Datum
circle_eq(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPeq(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_ne(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPne(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_lt(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPlt(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_gt(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPgt(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_le(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle(circle_ar(circle1), circle_ar(circle2)));
}
Datum
circle_ge(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPge(circle_ar(circle1), circle_ar(circle2)));
}
/*----------------------------------------------------------
* "Arithmetic" operators on circles.
*---------------------------------------------------------*/
static CIRCLE *
circle_copy(CIRCLE *circle)
{
CIRCLE *result;
if (!PointerIsValid(circle))
return NULL;
result = (CIRCLE *) palloc(sizeof(CIRCLE));
memcpy((char *) result, (char *) circle, sizeof(CIRCLE));
return result;
}
/* circle_add_pt()
* Translation operator.
*/
Datum
circle_add_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
result = circle_copy(circle);
result->center.x += point->x;
result->center.y += point->y;
PG_RETURN_CIRCLE_P(result);
}
Datum
circle_sub_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
result = circle_copy(circle);
result->center.x -= point->x;
result->center.y -= point->y;
PG_RETURN_CIRCLE_P(result);
}
/* circle_mul_pt()
* Rotation and scaling operators.
*/
Datum
circle_mul_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
Point *p;
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_mul,
PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
result->radius *= HYPOT(point->x, point->y);
PG_RETURN_CIRCLE_P(result);
}
Datum
circle_div_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
CIRCLE *result;
Point *p;
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_div,
PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
result->radius /= HYPOT(point->x, point->y);
PG_RETURN_CIRCLE_P(result);
}
/* circle_area - returns the area of the circle.
*/
Datum
circle_area(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
PG_RETURN_FLOAT8(circle_ar(circle));
}
/* circle_diameter - returns the diameter of the circle.
*/
Datum
circle_diameter(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
PG_RETURN_FLOAT8(2 * circle->radius);
}
/* circle_radius - returns the radius of the circle.
*/
Datum
circle_radius(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
PG_RETURN_FLOAT8(circle->radius);
}
/* circle_distance - returns the distance between
* two circles.
*/
Datum
circle_distance(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
float8 result;
result = point_dt(&circle1->center, &circle2->center)
- (circle1->radius + circle2->radius);
if (result < 0)
result = 0;
PG_RETURN_FLOAT8(result);
}
Datum
circle_contain_pt(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *point = PG_GETARG_POINT_P(1);
double d;
d = point_dt(&circle->center, point);
PG_RETURN_BOOL(d <= circle->radius);
}
Datum
pt_contained_circle(PG_FUNCTION_ARGS)
{
Point *point = PG_GETARG_POINT_P(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
double d;
d = point_dt(&circle->center, point);
PG_RETURN_BOOL(d <= circle->radius);
}
/* dist_pc - returns the distance between
* a point and a circle.
*/
Datum
dist_pc(PG_FUNCTION_ARGS)
{
Point *point = PG_GETARG_POINT_P(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
float8 result;
result = point_dt(point, &circle->center) - circle->radius;
if (result < 0)
result = 0;
PG_RETURN_FLOAT8(result);
}
/* circle_center - returns the center point of the circle.
*/
Datum
circle_center(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
Point *result;
result = (Point *) palloc(sizeof(Point));
result->x = circle->center.x;
result->y = circle->center.y;
PG_RETURN_POINT_P(result);
}
/* circle_ar - returns the area of the circle.
*/
static double
circle_ar(CIRCLE *circle)
{
return M_PI * (circle->radius * circle->radius);
}
/*----------------------------------------------------------
* Conversion operators.
*---------------------------------------------------------*/
Datum
cr_circle(PG_FUNCTION_ARGS)
{
Point *center = PG_GETARG_POINT_P(0);
float8 radius = PG_GETARG_FLOAT8(1);
CIRCLE *result;
result = (CIRCLE *) palloc(sizeof(CIRCLE));
result->center.x = center->x;
result->center.y = center->y;
result->radius = radius;
PG_RETURN_CIRCLE_P(result);
}
Datum
circle_box(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
BOX *box;
double delta;
box = (BOX *) palloc(sizeof(BOX));
delta = circle->radius / sqrt(2.0);
box->high.x = circle->center.x + delta;
box->low.x = circle->center.x - delta;
box->high.y = circle->center.y + delta;
box->low.y = circle->center.y - delta;
PG_RETURN_BOX_P(box);
}
/* box_circle()
* Convert a box to a circle.
*/
Datum
box_circle(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
CIRCLE *circle;
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
circle->center.x = (box->high.x + box->low.x) / 2;
circle->center.y = (box->high.y + box->low.y) / 2;
circle->radius = point_dt(&circle->center, &box->high);
PG_RETURN_CIRCLE_P(circle);
}
Datum
circle_poly(PG_FUNCTION_ARGS)
{
int32 npts = PG_GETARG_INT32(0);
CIRCLE *circle = PG_GETARG_CIRCLE_P(1);
POLYGON *poly;
int base_size,
size;
int i;
double angle;
double anglestep;
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot convert circle with radius zero to polygon")));
if (npts < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("must request at least 2 points")));
base_size = sizeof(poly->p[0]) * npts;
size = offsetof(POLYGON, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(poly->p[0]) || size <= base_size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many points requested")));
poly = (POLYGON *) palloc0(size); /* zero any holes */
SET_VARSIZE(poly, size);
poly->npts = npts;
anglestep = (2.0 * M_PI) / npts;
for (i = 0; i < npts; i++)
{
angle = i * anglestep;
poly->p[i].x = circle->center.x - (circle->radius * cos(angle));
poly->p[i].y = circle->center.y + (circle->radius * sin(angle));
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
/* poly_circle - convert polygon to circle
*
* XXX This algorithm should use weighted means of line segments
* rather than straight average values of points - tgl 97/01/21.
*/
Datum
poly_circle(PG_FUNCTION_ARGS)
{
POLYGON *poly = PG_GETARG_POLYGON_P(0);
CIRCLE *circle;
int i;
if (poly->npts < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot convert empty polygon to circle")));
circle = (CIRCLE *) palloc(sizeof(CIRCLE));
circle->center.x = 0;
circle->center.y = 0;
circle->radius = 0;
for (i = 0; i < poly->npts; i++)
{
circle->center.x += poly->p[i].x;
circle->center.y += poly->p[i].y;
}
circle->center.x /= poly->npts;
circle->center.y /= poly->npts;
for (i = 0; i < poly->npts; i++)
circle->radius += point_dt(&poly->p[i], &circle->center);
circle->radius /= poly->npts;
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot convert empty polygon to circle")));
PG_RETURN_CIRCLE_P(circle);
}
/***********************************************************************
**
** Private routines for multiple types.
**
***********************************************************************/
/*
* Test to see if the point is inside the polygon, returns 1/0, or 2 if
* the point is on the polygon.
* Code adapted but not copied from integer-based routines in WN: A
* Server for the HTTP
* version 1.15.1, file wn/image.c
* http://hopf.math.northwestern.edu/index.html
* Description of algorithm: http://www.linuxjournal.com/article/2197
* http://www.linuxjournal.com/article/2029
*/
#define POINT_ON_POLYGON INT_MAX
static int
point_inside(Point *p, int npts, Point *plist)
{
double x0,
y0;
double prev_x,
prev_y;
int i = 0;
double x,
y;
int cross,
total_cross = 0;
if (npts <= 0)
return 0;
/* compute first polygon point relative to single point */
x0 = plist[0].x - p->x;
y0 = plist[0].y - p->y;
prev_x = x0;
prev_y = y0;
/* loop over polygon points and aggregate total_cross */
for (i = 1; i < npts; i++)
{
/* compute next polygon point relative to single point */
x = plist[i].x - p->x;
y = plist[i].y - p->y;
/* compute previous to current point crossing */
if ((cross = lseg_crossing(x, y, prev_x, prev_y)) == POINT_ON_POLYGON)
return 2;
total_cross += cross;
prev_x = x;
prev_y = y;
}
/* now do the first point */
if ((cross = lseg_crossing(x0, y0, prev_x, prev_y)) == POINT_ON_POLYGON)
return 2;
total_cross += cross;
if (total_cross != 0)
return 1;
return 0;
}
/* lseg_crossing()
* Returns +/-2 if line segment crosses the positive X-axis in a +/- direction.
* Returns +/-1 if one point is on the positive X-axis.
* Returns 0 if both points are on the positive X-axis, or there is no crossing.
* Returns POINT_ON_POLYGON if the segment contains (0,0).
* Wow, that is one confusing API, but it is used above, and when summed,
* can tell is if a point is in a polygon.
*/
static int
lseg_crossing(double x, double y, double prev_x, double prev_y)
{
double z;
int y_sign;
if (FPzero(y))
{ /* y == 0, on X axis */
if (FPzero(x)) /* (x,y) is (0,0)? */
return POINT_ON_POLYGON;
else if (FPgt(x, 0))
{ /* x > 0 */
if (FPzero(prev_y)) /* y and prev_y are zero */
/* prev_x > 0? */
return FPgt(prev_x, 0) ? 0 : POINT_ON_POLYGON;
return FPlt(prev_y, 0) ? 1 : -1;
}
else
{ /* x < 0, x not on positive X axis */
if (FPzero(prev_y))
/* prev_x < 0? */
return FPlt(prev_x, 0) ? 0 : POINT_ON_POLYGON;
return 0;
}
}
else
{ /* y != 0 */
/* compute y crossing direction from previous point */
y_sign = FPgt(y, 0) ? 1 : -1;
if (FPzero(prev_y))
/* previous point was on X axis, so new point is either off or on */
return FPlt(prev_x, 0) ? 0 : y_sign;
else if (FPgt(y_sign * prev_y, 0))
/* both above or below X axis */
return 0; /* same sign */
else
{ /* y and prev_y cross X-axis */
if (FPge(x, 0) && FPgt(prev_x, 0))
/* both non-negative so cross positive X-axis */
return 2 * y_sign;
if (FPlt(x, 0) && FPle(prev_x, 0))
/* both non-positive so do not cross positive X-axis */
return 0;
/* x and y cross axises, see URL above point_inside() */
z = (x - prev_x) * y - (y - prev_y) * x;
if (FPzero(z))
return POINT_ON_POLYGON;
return FPgt((y_sign * z), 0) ? 0 : 2 * y_sign;
}
}
}
static bool
plist_same(int npts, Point *p1, Point *p2)
{
int i,
ii,
j;
/* find match for first point */
for (i = 0; i < npts; i++)
{
if ((FPeq(p2[i].x, p1[0].x))
&& (FPeq(p2[i].y, p1[0].y)))
{
/* match found? then look forward through remaining points */
for (ii = 1, j = i + 1; ii < npts; ii++, j++)
{
if (j >= npts)
j = 0;
if ((!FPeq(p2[j].x, p1[ii].x))
|| (!FPeq(p2[j].y, p1[ii].y)))
{
#ifdef GEODEBUG
printf("plist_same- %d failed forward match with %d\n", j, ii);
#endif
break;
}
}
#ifdef GEODEBUG
printf("plist_same- ii = %d/%d after forward match\n", ii, npts);
#endif
if (ii == npts)
return TRUE;
/* match not found forwards? then look backwards */
for (ii = 1, j = i - 1; ii < npts; ii++, j--)
{
if (j < 0)
j = (npts - 1);
if ((!FPeq(p2[j].x, p1[ii].x))
|| (!FPeq(p2[j].y, p1[ii].y)))
{
#ifdef GEODEBUG
printf("plist_same- %d failed reverse match with %d\n", j, ii);
#endif
break;
}
}
#ifdef GEODEBUG
printf("plist_same- ii = %d/%d after reverse match\n", ii, npts);
#endif
if (ii == npts)
return TRUE;
}
}
return FALSE;
}
/*-------------------------------------------------------------------------
* Determine the hypotenuse.
*
* If required, x and y are swapped to make x the larger number. The
* traditional formula of x^2+y^2 is rearranged to factor x outside the
* sqrt. This allows computation of the hypotenuse for significantly
* larger values, and with a higher precision than when using the naive
* formula. In particular, this cannot overflow unless the final result
* would be out-of-range.
*
* sqrt( x^2 + y^2 ) = sqrt( x^2( 1 + y^2/x^2) )
* = x * sqrt( 1 + y^2/x^2 )
* = x * sqrt( 1 + y/x * y/x )
*
* It is expected that this routine will eventually be replaced with the
* C99 hypot() function.
*
* This implementation conforms to IEEE Std 1003.1 and GLIBC, in that the
* case of hypot(inf,nan) results in INF, and not NAN.
*-----------------------------------------------------------------------
*/
double
pg_hypot(double x, double y)
{
double yx;
/* Handle INF and NaN properly */
if (isinf(x) || isinf(y))
return get_float8_infinity();
if (isnan(x) || isnan(y))
return get_float8_nan();
/* Else, drop any minus signs */
x = fabs(x);
y = fabs(y);
/* Swap x and y if needed to make x the larger one */
if (x < y)
{
double temp = x;
x = y;
y = temp;
}
/*
* If y is zero, the hypotenuse is x. This test saves a few cycles in
* such cases, but more importantly it also protects against
* divide-by-zero errors, since now x >= y.
*/
if (y == 0.0)
return x;
/* Determine the hypotenuse */
yx = y / x;
return x * sqrt(1.0 + (yx * yx));
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_8 |
crossvul-cpp_data_bad_3569_0 | /*
* Copyright (c) 2008, Christoph Hellwig
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
/*
* Locking scheme:
* - all ACL updates are protected by inode->i_mutex, which is taken before
* calling into this file.
*/
STATIC struct posix_acl *
xfs_acl_from_disk(struct xfs_acl *aclp)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
struct xfs_acl_entry *ace;
int count, i;
count = be32_to_cpu(aclp->acl_cnt);
if (count > XFS_ACL_MAX_ENTRIES)
return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
acl_e = &acl->a_entries[i];
ace = &aclp->acl_entry[i];
/*
* The tag is 32 bits on disk and 16 bits in core.
*
* Because every access to it goes through the core
* format first this is not a problem.
*/
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
switch (acl_e->e_tag) {
case ACL_USER:
case ACL_GROUP:
acl_e->e_id = be32_to_cpu(ace->ae_id);
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
acl_e->e_id = ACL_UNDEFINED_ID;
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
STATIC void
xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
{
const struct posix_acl_entry *acl_e;
struct xfs_acl_entry *ace;
int i;
aclp->acl_cnt = cpu_to_be32(acl->a_count);
for (i = 0; i < acl->a_count; i++) {
ace = &aclp->acl_entry[i];
acl_e = &acl->a_entries[i];
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
ace->ae_id = cpu_to_be32(acl_e->e_id);
ace->ae_perm = cpu_to_be16(acl_e->e_perm);
}
}
struct posix_acl *
xfs_get_acl(struct inode *inode, int type)
{
struct xfs_inode *ip = XFS_I(inode);
struct posix_acl *acl;
struct xfs_acl *xfs_acl;
int len = sizeof(struct xfs_acl);
unsigned char *ea_name;
int error;
acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED)
return acl;
trace_xfs_get_acl(ip);
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
ea_name = SGI_ACL_DEFAULT;
break;
default:
BUG();
}
/*
* If we have a cached ACLs value just return it, not need to
* go out to the disk.
*/
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
if (!xfs_acl)
return ERR_PTR(-ENOMEM);
error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
&len, ATTR_ROOT);
if (error) {
/*
* If the attribute doesn't exist make sure we have a negative
* cache entry, for any other error assume it is transient and
* leave the cache entry as ACL_NOT_CACHED.
*/
if (error == -ENOATTR) {
acl = NULL;
goto out_update_cache;
}
goto out;
}
acl = xfs_acl_from_disk(xfs_acl);
if (IS_ERR(acl))
goto out;
out_update_cache:
set_cached_acl(inode, type, acl);
out:
kfree(xfs_acl);
return acl;
}
STATIC int
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
struct xfs_inode *ip = XFS_I(inode);
unsigned char *ea_name;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
ea_name = SGI_ACL_DEFAULT;
break;
default:
return -EINVAL;
}
if (acl) {
struct xfs_acl *xfs_acl;
int len;
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
if (!xfs_acl)
return -ENOMEM;
xfs_acl_to_disk(xfs_acl, acl);
len = sizeof(struct xfs_acl) -
(sizeof(struct xfs_acl_entry) *
(XFS_ACL_MAX_ENTRIES - acl->a_count));
error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
len, ATTR_ROOT);
kfree(xfs_acl);
} else {
/*
* A NULL ACL argument means we want to remove the ACL.
*/
error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT);
/*
* If the attribute didn't exist to start with that's fine.
*/
if (error == -ENOATTR)
error = 0;
}
if (!error)
set_cached_acl(inode, type, acl);
return error;
}
static int
xfs_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
if (mode != inode->i_mode) {
struct iattr iattr;
iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
iattr.ia_mode = mode;
iattr.ia_ctime = current_fs_time(inode->i_sb);
error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
return error;
}
static int
xfs_acl_exists(struct inode *inode, unsigned char *name)
{
int len = sizeof(struct xfs_acl);
return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
ATTR_ROOT|ATTR_KERNOVAL) == 0);
}
int
posix_acl_access_exists(struct inode *inode)
{
return xfs_acl_exists(inode, SGI_ACL_FILE);
}
int
posix_acl_default_exists(struct inode *inode)
{
if (!S_ISDIR(inode->i_mode))
return 0;
return xfs_acl_exists(inode, SGI_ACL_DEFAULT);
}
/*
* No need for i_mutex because the inode is not yet exposed to the VFS.
*/
int
xfs_inherit_acl(struct inode *inode, struct posix_acl *acl)
{
umode_t mode = inode->i_mode;
int error = 0, inherit = 0;
if (S_ISDIR(inode->i_mode)) {
error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
if (error)
goto out;
}
error = posix_acl_create(&acl, GFP_KERNEL, &mode);
if (error < 0)
return error;
/*
* If posix_acl_create returns a positive value we need to
* inherit a permission that can't be represented using the Unix
* mode bits and we actually need to set an ACL.
*/
if (error > 0)
inherit = 1;
error = xfs_set_mode(inode, mode);
if (error)
goto out;
if (inherit)
error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl);
out:
posix_acl_release(acl);
return error;
}
int
xfs_acl_chmod(struct inode *inode)
{
struct posix_acl *acl;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (error)
return error;
error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
return error;
}
static int
xfs_xattr_acl_get(struct dentry *dentry, const char *name,
void *value, size_t size, int type)
{
struct posix_acl *acl;
int error;
acl = xfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
return -ENODATA;
error = posix_acl_to_xattr(acl, value, size);
posix_acl_release(acl);
return error;
}
static int
xfs_xattr_acl_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
struct inode *inode = dentry->d_inode;
struct posix_acl *acl = NULL;
int error = 0;
if (flags & XATTR_CREATE)
return -EINVAL;
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
return value ? -EACCES : 0;
if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
if (!value)
goto set_acl;
acl = posix_acl_from_xattr(value, size);
if (!acl) {
/*
* acl_set_file(3) may request that we set default ACLs with
* zero length -- defend (gracefully) against that here.
*/
goto out;
}
if (IS_ERR(acl)) {
error = PTR_ERR(acl);
goto out;
}
error = posix_acl_valid(acl);
if (error)
goto out_release;
error = -EINVAL;
if (acl->a_count > XFS_ACL_MAX_ENTRIES)
goto out_release;
if (type == ACL_TYPE_ACCESS) {
umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error <= 0) {
posix_acl_release(acl);
acl = NULL;
if (error < 0)
return error;
}
error = xfs_set_mode(inode, mode);
if (error)
goto out_release;
}
set_acl:
error = xfs_set_acl(inode, type, acl);
out_release:
posix_acl_release(acl);
out:
return error;
}
const struct xattr_handler xfs_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
.flags = ACL_TYPE_ACCESS,
.get = xfs_xattr_acl_get,
.set = xfs_xattr_acl_set,
};
const struct xattr_handler xfs_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
.get = xfs_xattr_acl_get,
.set = xfs_xattr_acl_set,
};
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3569_0 |
crossvul-cpp_data_good_3498_1 | /*
* Support for o32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
#define ELF_CLASS ELFCLASS32
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB;
#else /* __MIPSEL__ */
#define ELF_DATA ELFDATA2LSB;
#endif
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned int elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
\
__res; \
})
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
#define jiffies_to_timeval jiffies_to_compat_timeval
static inline void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
u32 rem;
value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#undef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
for (i = 0; i < EF_R0; i++)
grp[i] = 0;
grp[EF_R0] = 0;
for (i = 1; i <= 31; i++)
grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
grp[EF_R26] = 0;
grp[EF_R27] = 0;
grp[EF_LO] = (elf_greg_t) regs->lo;
grp[EF_HI] = (elf_greg_t) regs->hi;
grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
#ifdef EF_UNUSED0
grp[EF_UNUSED0] = 0;
#endif
}
MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
#include "../../../fs/binfmt_elf.c"
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_1 |
crossvul-cpp_data_bad_5000_0 | /*
* x_tables core - Backend for {ip,ip6,arp}_tables
*
* Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
* Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* Based on existing ip_tables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <net/net_namespace.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
struct compat_delta {
unsigned int offset; /* offset in kernel */
int delta; /* delta in 32bit user land */
};
struct xt_af {
struct mutex mutex;
struct list_head match;
struct list_head target;
#ifdef CONFIG_COMPAT
struct mutex compat_mutex;
struct compat_delta *compat_tab;
unsigned int number; /* number of slots in compat_tab[] */
unsigned int cur; /* number of used slots in compat_tab[] */
#endif
};
static struct xt_af *xt;
static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_UNSPEC] = "x",
[NFPROTO_IPV4] = "ip",
[NFPROTO_ARP] = "arp",
[NFPROTO_BRIDGE] = "eb",
[NFPROTO_IPV6] = "ip6",
};
/* Registration hooks for targets. */
int xt_register_target(struct xt_target *target)
{
u_int8_t af = target->family;
mutex_lock(&xt[af].mutex);
list_add(&target->list, &xt[af].target);
mutex_unlock(&xt[af].mutex);
return 0;
}
EXPORT_SYMBOL(xt_register_target);
void
xt_unregister_target(struct xt_target *target)
{
u_int8_t af = target->family;
mutex_lock(&xt[af].mutex);
list_del(&target->list);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_target);
int
xt_register_targets(struct xt_target *target, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = xt_register_target(&target[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
xt_unregister_targets(target, i);
return err;
}
EXPORT_SYMBOL(xt_register_targets);
void
xt_unregister_targets(struct xt_target *target, unsigned int n)
{
while (n-- > 0)
xt_unregister_target(&target[n]);
}
EXPORT_SYMBOL(xt_unregister_targets);
int xt_register_match(struct xt_match *match)
{
u_int8_t af = match->family;
mutex_lock(&xt[af].mutex);
list_add(&match->list, &xt[af].match);
mutex_unlock(&xt[af].mutex);
return 0;
}
EXPORT_SYMBOL(xt_register_match);
void
xt_unregister_match(struct xt_match *match)
{
u_int8_t af = match->family;
mutex_lock(&xt[af].mutex);
list_del(&match->list);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_match);
int
xt_register_matches(struct xt_match *match, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = xt_register_match(&match[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
xt_unregister_matches(match, i);
return err;
}
EXPORT_SYMBOL(xt_register_matches);
void
xt_unregister_matches(struct xt_match *match, unsigned int n)
{
while (n-- > 0)
xt_unregister_match(&match[n]);
}
EXPORT_SYMBOL(xt_unregister_matches);
/*
* These are weird, but module loading must not be done with mutex
* held (since they will register), and we have to have a single
* function to use.
*/
/* Find match, grabs ref. Returns ERR_PTR() on error. */
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
{
struct xt_match *m;
int err = -ENOENT;
mutex_lock(&xt[af].mutex);
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision == revision) {
if (try_module_get(m->me)) {
mutex_unlock(&xt[af].mutex);
return m;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC)
/* Try searching again in the family-independent list */
return xt_find_match(NFPROTO_UNSPEC, name, revision);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_match);
struct xt_match *
xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{
struct xt_match *match;
match = xt_find_match(nfproto, name, revision);
if (IS_ERR(match)) {
request_module("%st_%s", xt_prefix[nfproto], name);
match = xt_find_match(nfproto, name, revision);
}
return match;
}
EXPORT_SYMBOL_GPL(xt_request_find_match);
/* Find target, grabs ref. Returns ERR_PTR() on error. */
struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *t;
int err = -ENOENT;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision == revision) {
if (try_module_get(t->me)) {
mutex_unlock(&xt[af].mutex);
return t;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC)
/* Try searching again in the family-independent list */
return xt_find_target(NFPROTO_UNSPEC, name, revision);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_target);
struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *target;
target = xt_find_target(af, name, revision);
if (IS_ERR(target)) {
request_module("%st_%s", xt_prefix[af], name);
target = xt_find_target(af, name, revision);
}
return target;
}
EXPORT_SYMBOL_GPL(xt_request_find_target);
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_match *m;
int have_rev = 0;
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision > *bestp)
*bestp = m->revision;
if (m->revision == revision)
have_rev = 1;
}
}
if (af != NFPROTO_UNSPEC && !have_rev)
return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_target *t;
int have_rev = 0;
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision > *bestp)
*bestp = t->revision;
if (t->revision == revision)
have_rev = 1;
}
}
if (af != NFPROTO_UNSPEC && !have_rev)
return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
/* Returns true or false (if no such extension at all) */
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
int *err)
{
int have_rev, best = -1;
mutex_lock(&xt[af].mutex);
if (target == 1)
have_rev = target_revfn(af, name, revision, &best);
else
have_rev = match_revfn(af, name, revision, &best);
mutex_unlock(&xt[af].mutex);
/* Nothing at all? Return 0 to try loading module. */
if (best == -1) {
*err = -ENOENT;
return 0;
}
*err = best;
if (!have_rev)
*err = -EPROTONOSUPPORT;
return 1;
}
EXPORT_SYMBOL_GPL(xt_find_revision);
static char *
textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
{
static const char *const inetbr_names[] = {
"PREROUTING", "INPUT", "FORWARD",
"OUTPUT", "POSTROUTING", "BROUTING",
};
static const char *const arp_names[] = {
"INPUT", "FORWARD", "OUTPUT",
};
const char *const *names;
unsigned int i, max;
char *p = buf;
bool np = false;
int res;
names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
ARRAY_SIZE(inetbr_names);
*p = '\0';
for (i = 0; i < max; ++i) {
if (!(mask & (1 << i)))
continue;
res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
if (res > 0) {
size -= res;
p += res;
}
np = true;
}
return buf;
}
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
* ebt_among is exempt from centralized matchsize checking
* because it uses a dynamic-size data set.
*/
pr_err("%s_tables: %s.%u match: invalid size "
"%u (kernel) != (user) %u\n",
xt_prefix[par->family], par->match->name,
par->match->revision,
XT_ALIGN(par->match->matchsize), size);
return -EINVAL;
}
if (par->match->table != NULL &&
strcmp(par->match->table, par->table) != 0) {
pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
xt_prefix[par->family], par->match->name,
par->match->table, par->table);
return -EINVAL;
}
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
char used[64], allow[64];
pr_err("%s_tables: %s match: used from hooks %s, but only "
"valid from %s\n",
xt_prefix[par->family], par->match->name,
textify_hooks(used, sizeof(used), par->hook_mask,
par->family),
textify_hooks(allow, sizeof(allow), par->match->hooks,
par->family));
return -EINVAL;
}
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
pr_err("%s_tables: %s match: only valid for protocol %u\n",
xt_prefix[par->family], par->match->name,
par->match->proto);
return -EINVAL;
}
if (par->match->checkentry != NULL) {
ret = par->match->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_match);
#ifdef CONFIG_COMPAT
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
struct xt_af *xp = &xt[af];
if (!xp->compat_tab) {
if (!xp->number)
return -EINVAL;
xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
if (!xp->compat_tab)
return -ENOMEM;
xp->cur = 0;
}
if (xp->cur >= xp->number)
return -EINVAL;
if (xp->cur)
delta += xp->compat_tab[xp->cur - 1].delta;
xp->compat_tab[xp->cur].offset = offset;
xp->compat_tab[xp->cur].delta = delta;
xp->cur++;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);
void xt_compat_flush_offsets(u_int8_t af)
{
if (xt[af].compat_tab) {
vfree(xt[af].compat_tab);
xt[af].compat_tab = NULL;
xt[af].number = 0;
xt[af].cur = 0;
}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
struct compat_delta *tmp = xt[af].compat_tab;
int mid, left = 0, right = xt[af].cur - 1;
while (left <= right) {
mid = (left + right) >> 1;
if (offset > tmp[mid].offset)
left = mid + 1;
else if (offset < tmp[mid].offset)
right = mid - 1;
else
return mid ? tmp[mid - 1].delta : 0;
}
return left ? tmp[left - 1].delta : 0;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
void xt_compat_init_offsets(u_int8_t af, unsigned int number)
{
xt[af].number = number;
xt[af].cur = 0;
}
EXPORT_SYMBOL(xt_compat_init_offsets);
int xt_compat_match_offset(const struct xt_match *match)
{
u_int16_t csize = match->compatsize ? : match->matchsize;
return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_match_offset);
int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
int pad, off = xt_compat_match_offset(match);
u_int16_t msize = cm->u.user.match_size;
m = *dstptr;
memcpy(m, cm, sizeof(*cm));
if (match->compat_from_user)
match->compat_from_user(m->data, cm->data);
else
memcpy(m->data, cm->data, msize - sizeof(*cm));
pad = XT_ALIGN(match->matchsize) - match->matchsize;
if (pad > 0)
memset(m->data + match->matchsize, 0, pad);
msize += off;
m->u.user.match_size = msize;
*size += off;
*dstptr += msize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match __user *cm = *dstptr;
int off = xt_compat_match_offset(match);
u_int16_t msize = m->u.user.match_size - off;
if (copy_to_user(cm, m, sizeof(*cm)) ||
put_user(msize, &cm->u.user.match_size) ||
copy_to_user(cm->u.user.name, m->u.kernel.match->name,
strlen(m->u.kernel.match->name) + 1))
return -EFAULT;
if (match->compat_to_user) {
if (match->compat_to_user((void __user *)cm->data, m->data))
return -EFAULT;
} else {
if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
return -EFAULT;
}
*size -= off;
*dstptr += msize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
#endif /* CONFIG_COMPAT */
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->target->targetsize) != size) {
pr_err("%s_tables: %s.%u target: invalid size "
"%u (kernel) != (user) %u\n",
xt_prefix[par->family], par->target->name,
par->target->revision,
XT_ALIGN(par->target->targetsize), size);
return -EINVAL;
}
if (par->target->table != NULL &&
strcmp(par->target->table, par->table) != 0) {
pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
xt_prefix[par->family], par->target->name,
par->target->table, par->table);
return -EINVAL;
}
if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
char used[64], allow[64];
pr_err("%s_tables: %s target: used from hooks %s, but only "
"usable from %s\n",
xt_prefix[par->family], par->target->name,
textify_hooks(used, sizeof(used), par->hook_mask,
par->family),
textify_hooks(allow, sizeof(allow), par->target->hooks,
par->family));
return -EINVAL;
}
if (par->target->proto && (par->target->proto != proto || inv_proto)) {
pr_err("%s_tables: %s target: only valid for protocol %u\n",
xt_prefix[par->family], par->target->name,
par->target->proto);
return -EINVAL;
}
if (par->target->checkentry != NULL) {
ret = par->target->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_target);
#ifdef CONFIG_COMPAT
int xt_compat_target_offset(const struct xt_target *target)
{
u_int16_t csize = target->compatsize ? : target->targetsize;
return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_target_offset);
void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
int pad, off = xt_compat_target_offset(target);
u_int16_t tsize = ct->u.user.target_size;
t = *dstptr;
memcpy(t, ct, sizeof(*ct));
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
memcpy(t->data, ct->data, tsize - sizeof(*ct));
pad = XT_ALIGN(target->targetsize) - target->targetsize;
if (pad > 0)
memset(t->data + target->targetsize, 0, pad);
tsize += off;
t->u.user.target_size = tsize;
*size += off;
*dstptr += tsize;
}
EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target __user *ct = *dstptr;
int off = xt_compat_target_offset(target);
u_int16_t tsize = t->u.user.target_size - off;
if (copy_to_user(ct, t, sizeof(*ct)) ||
put_user(tsize, &ct->u.user.target_size) ||
copy_to_user(ct->u.user.name, t->u.kernel.target->name,
strlen(t->u.kernel.target->name) + 1))
return -EFAULT;
if (target->compat_to_user) {
if (target->compat_to_user((void __user *)ct->data, t->data))
return -EFAULT;
} else {
if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
return -EFAULT;
}
*size -= off;
*dstptr += tsize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
#endif
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
struct xt_table_info *info = NULL;
size_t sz = sizeof(*info) + size;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!info) {
info = vmalloc(sz);
if (!info)
return NULL;
}
memset(info, 0, sizeof(*info));
info->size = size;
return info;
}
EXPORT_SYMBOL(xt_alloc_table_info);
void xt_free_table_info(struct xt_table_info *info)
{
int cpu;
if (info->jumpstack != NULL) {
for_each_possible_cpu(cpu)
kvfree(info->jumpstack[cpu]);
kvfree(info->jumpstack);
}
kvfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name)
{
struct xt_table *t, *found = NULL;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &net->xt.tables[af], list)
if (strcmp(t->name, name) == 0 && try_module_get(t->me))
return t;
if (net == &init_net)
goto out;
/* Table doesn't exist in this netns, re-try init */
list_for_each_entry(t, &init_net.xt.tables[af], list) {
if (strcmp(t->name, name))
continue;
if (!try_module_get(t->me))
return NULL;
mutex_unlock(&xt[af].mutex);
if (t->table_init(net) != 0) {
module_put(t->me);
return NULL;
}
found = t;
mutex_lock(&xt[af].mutex);
break;
}
if (!found)
goto out;
/* and once again: */
list_for_each_entry(t, &net->xt.tables[af], list)
if (strcmp(t->name, name) == 0)
return t;
module_put(found->me);
out:
mutex_unlock(&xt[af].mutex);
return NULL;
}
EXPORT_SYMBOL_GPL(xt_find_table_lock);
void xt_table_unlock(struct xt_table *table)
{
mutex_unlock(&xt[table->af].mutex);
}
EXPORT_SYMBOL_GPL(xt_table_unlock);
#ifdef CONFIG_COMPAT
void xt_compat_lock(u_int8_t af)
{
mutex_lock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_lock);
void xt_compat_unlock(u_int8_t af)
{
mutex_unlock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_unlock);
#endif
DEFINE_PER_CPU(seqcount_t, xt_recseq);
EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
struct static_key xt_tee_enabled __read_mostly;
EXPORT_SYMBOL_GPL(xt_tee_enabled);
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
unsigned int size;
int cpu;
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
i->jumpstack = vzalloc(size);
else
i->jumpstack = kzalloc(size, GFP_KERNEL);
if (i->jumpstack == NULL)
return -ENOMEM;
/* ruleset without jumps -- no stack needed */
if (i->stacksize == 0)
return 0;
/* Jumpstack needs to be able to record two full callchains, one
* from the first rule set traversal, plus one table reentrancy
* via -j TEE without clobbering the callchain that brought us to
* TEE target.
*
* This is done by allocating two jumpstacks per cpu, on reentry
* the upper half of the stack is used.
*
* see the jumpstack setup in ipt_do_table() for more details.
*/
size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
if (size > PAGE_SIZE)
i->jumpstack[cpu] = vmalloc_node(size,
cpu_to_node(cpu));
else
i->jumpstack[cpu] = kmalloc_node(size,
GFP_KERNEL, cpu_to_node(cpu));
if (i->jumpstack[cpu] == NULL)
/*
* Freeing will be done later on by the callers. The
* chain is: xt_replace_table -> __do_replace ->
* do_replace -> xt_free_table_info.
*/
return -ENOMEM;
}
return 0;
}
struct xt_table_info *
xt_replace_table(struct xt_table *table,
unsigned int num_counters,
struct xt_table_info *newinfo,
int *error)
{
struct xt_table_info *private;
int ret;
ret = xt_jumpstack_alloc(newinfo);
if (ret < 0) {
*error = ret;
return NULL;
}
/* Do the substitution. */
local_bh_disable();
private = table->private;
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
local_bh_enable();
*error = -EAGAIN;
return NULL;
}
newinfo->initial_entries = private->initial_entries;
/*
* Ensure contents of newinfo are visible before assigning to
* private.
*/
smp_wmb();
table->private = newinfo;
/*
* Even though table entries have now been swapped, other CPU's
* may still be using the old entries. This is okay, because
* resynchronization happens because of the locking done
* during the get_counters() routine.
*/
local_bh_enable();
#ifdef CONFIG_AUDIT
if (audit_enabled) {
struct audit_buffer *ab;
ab = audit_log_start(current->audit_context, GFP_KERNEL,
AUDIT_NETFILTER_CFG);
if (ab) {
audit_log_format(ab, "table=%s family=%u entries=%u",
table->name, table->af,
private->number);
audit_log_end(ab);
}
}
#endif
return private;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *input_table,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo)
{
int ret;
struct xt_table_info *private;
struct xt_table *t, *table;
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto out;
}
mutex_lock(&xt[table->af].mutex);
/* Don't autoload: we'd eat our tail... */
list_for_each_entry(t, &net->xt.tables[table->af], list) {
if (strcmp(t->name, table->name) == 0) {
ret = -EEXIST;
goto unlock;
}
}
/* Simplifies replace_table code. */
table->private = bootstrap;
if (!xt_replace_table(table, 0, newinfo, &ret))
goto unlock;
private = table->private;
pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
private->initial_entries = private->number;
list_add(&table->list, &net->xt.tables[table->af]);
mutex_unlock(&xt[table->af].mutex);
return table;
unlock:
mutex_unlock(&xt[table->af].mutex);
kfree(table);
out:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(xt_register_table);
void *xt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
mutex_lock(&xt[table->af].mutex);
private = table->private;
list_del(&table->list);
mutex_unlock(&xt[table->af].mutex);
kfree(table);
return private;
}
EXPORT_SYMBOL_GPL(xt_unregister_table);
#ifdef CONFIG_PROC_FS
struct xt_names_priv {
struct seq_net_private p;
u_int8_t af;
};
static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
{
struct xt_names_priv *priv = seq->private;
struct net *net = seq_file_net(seq);
u_int8_t af = priv->af;
mutex_lock(&xt[af].mutex);
return seq_list_start(&net->xt.tables[af], *pos);
}
static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct xt_names_priv *priv = seq->private;
struct net *net = seq_file_net(seq);
u_int8_t af = priv->af;
return seq_list_next(v, &net->xt.tables[af], pos);
}
static void xt_table_seq_stop(struct seq_file *seq, void *v)
{
struct xt_names_priv *priv = seq->private;
u_int8_t af = priv->af;
mutex_unlock(&xt[af].mutex);
}
static int xt_table_seq_show(struct seq_file *seq, void *v)
{
struct xt_table *table = list_entry(v, struct xt_table, list);
if (*table->name)
seq_printf(seq, "%s\n", table->name);
return 0;
}
static const struct seq_operations xt_table_seq_ops = {
.start = xt_table_seq_start,
.next = xt_table_seq_next,
.stop = xt_table_seq_stop,
.show = xt_table_seq_show,
};
static int xt_table_open(struct inode *inode, struct file *file)
{
int ret;
struct xt_names_priv *priv;
ret = seq_open_net(inode, file, &xt_table_seq_ops,
sizeof(struct xt_names_priv));
if (!ret) {
priv = ((struct seq_file *)file->private_data)->private;
priv->af = (unsigned long)PDE_DATA(inode);
}
return ret;
}
static const struct file_operations xt_table_ops = {
.owner = THIS_MODULE,
.open = xt_table_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
/*
* Traverse state for ip{,6}_{tables,matches} for helping crossing
* the multi-AF mutexes.
*/
struct nf_mttg_trav {
struct list_head *head, *curr;
uint8_t class, nfproto;
};
enum {
MTTG_TRAV_INIT,
MTTG_TRAV_NFP_UNSPEC,
MTTG_TRAV_NFP_SPEC,
MTTG_TRAV_DONE,
};
static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
bool is_target)
{
static const uint8_t next_class[] = {
[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
[MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
};
struct nf_mttg_trav *trav = seq->private;
switch (trav->class) {
case MTTG_TRAV_INIT:
trav->class = MTTG_TRAV_NFP_UNSPEC;
mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
trav->head = trav->curr = is_target ?
&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
break;
case MTTG_TRAV_NFP_UNSPEC:
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
mutex_lock(&xt[trav->nfproto].mutex);
trav->head = trav->curr = is_target ?
&xt[trav->nfproto].target : &xt[trav->nfproto].match;
trav->class = next_class[trav->class];
break;
case MTTG_TRAV_NFP_SPEC:
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
/* fallthru, _stop will unlock */
default:
return NULL;
}
if (ppos != NULL)
++*ppos;
return trav;
}
static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
bool is_target)
{
struct nf_mttg_trav *trav = seq->private;
unsigned int j;
trav->class = MTTG_TRAV_INIT;
for (j = 0; j < *pos; ++j)
if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
return NULL;
return trav;
}
static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
{
struct nf_mttg_trav *trav = seq->private;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
break;
case MTTG_TRAV_NFP_SPEC:
mutex_unlock(&xt[trav->nfproto].mutex);
break;
}
}
static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
{
return xt_mttg_seq_start(seq, pos, false);
}
static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return xt_mttg_seq_next(seq, v, ppos, false);
}
static int xt_match_seq_show(struct seq_file *seq, void *v)
{
const struct nf_mttg_trav *trav = seq->private;
const struct xt_match *match;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
case MTTG_TRAV_NFP_SPEC:
if (trav->curr == trav->head)
return 0;
match = list_entry(trav->curr, struct xt_match, list);
if (*match->name)
seq_printf(seq, "%s\n", match->name);
}
return 0;
}
static const struct seq_operations xt_match_seq_ops = {
.start = xt_match_seq_start,
.next = xt_match_seq_next,
.stop = xt_mttg_seq_stop,
.show = xt_match_seq_show,
};
static int xt_match_open(struct inode *inode, struct file *file)
{
struct nf_mttg_trav *trav;
trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
if (!trav)
return -ENOMEM;
trav->nfproto = (unsigned long)PDE_DATA(inode);
return 0;
}
static const struct file_operations xt_match_ops = {
.owner = THIS_MODULE,
.open = xt_match_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
{
return xt_mttg_seq_start(seq, pos, true);
}
static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return xt_mttg_seq_next(seq, v, ppos, true);
}
static int xt_target_seq_show(struct seq_file *seq, void *v)
{
const struct nf_mttg_trav *trav = seq->private;
const struct xt_target *target;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
case MTTG_TRAV_NFP_SPEC:
if (trav->curr == trav->head)
return 0;
target = list_entry(trav->curr, struct xt_target, list);
if (*target->name)
seq_printf(seq, "%s\n", target->name);
}
return 0;
}
static const struct seq_operations xt_target_seq_ops = {
.start = xt_target_seq_start,
.next = xt_target_seq_next,
.stop = xt_mttg_seq_stop,
.show = xt_target_seq_show,
};
static int xt_target_open(struct inode *inode, struct file *file)
{
struct nf_mttg_trav *trav;
trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
if (!trav)
return -ENOMEM;
trav->nfproto = (unsigned long)PDE_DATA(inode);
return 0;
}
static const struct file_operations xt_target_ops = {
.owner = THIS_MODULE,
.open = xt_target_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#define FORMAT_TABLES "_tables_names"
#define FORMAT_MATCHES "_tables_matches"
#define FORMAT_TARGETS "_tables_targets"
#endif /* CONFIG_PROC_FS */
/**
* xt_hook_ops_alloc - set up hooks for a new table
* @table: table with metadata needed to set up hooks
* @fn: Hook function
*
* This function will create the nf_hook_ops that the x_table needs
* to hand to xt_hook_link_net().
*/
struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
{
unsigned int hook_mask = table->valid_hooks;
uint8_t i, num_hooks = hweight32(hook_mask);
uint8_t hooknum;
struct nf_hook_ops *ops;
ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
hook_mask >>= 1, ++hooknum) {
if (!(hook_mask & 1))
continue;
ops[i].hook = fn;
ops[i].pf = table->af;
ops[i].hooknum = hooknum;
ops[i].priority = table->priority;
++i;
}
return ops;
}
EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
int xt_proto_init(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
struct proc_dir_entry *proc;
kuid_t root_uid;
kgid_t root_gid;
#endif
if (af >= ARRAY_SIZE(xt_prefix))
return -EINVAL;
#ifdef CONFIG_PROC_FS
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
(void *)(unsigned long)af);
if (!proc)
goto out;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
(void *)(unsigned long)af);
if (!proc)
goto out_remove_tables;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
(void *)(unsigned long)af);
if (!proc)
goto out_remove_matches;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
#endif
return 0;
#ifdef CONFIG_PROC_FS
out_remove_matches:
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out_remove_tables:
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out:
return -1;
#endif
}
EXPORT_SYMBOL_GPL(xt_proto_init);
void xt_proto_fini(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
#endif /*CONFIG_PROC_FS*/
}
EXPORT_SYMBOL_GPL(xt_proto_fini);
static int __net_init xt_net_init(struct net *net)
{
int i;
for (i = 0; i < NFPROTO_NUMPROTO; i++)
INIT_LIST_HEAD(&net->xt.tables[i]);
return 0;
}
static struct pernet_operations xt_net_ops = {
.init = xt_net_init,
};
static int __init xt_init(void)
{
unsigned int i;
int rv;
for_each_possible_cpu(i) {
seqcount_init(&per_cpu(xt_recseq, i));
}
xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
if (!xt)
return -ENOMEM;
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
mutex_init(&xt[i].mutex);
#ifdef CONFIG_COMPAT
mutex_init(&xt[i].compat_mutex);
xt[i].compat_tab = NULL;
#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
}
rv = register_pernet_subsys(&xt_net_ops);
if (rv < 0)
kfree(xt);
return rv;
}
static void __exit xt_fini(void)
{
unregister_pernet_subsys(&xt_net_ops);
kfree(xt);
}
module_init(xt_init);
module_exit(xt_fini);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5000_0 |
crossvul-cpp_data_good_3571_0 | /*
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2008 Red Hat Inc.
*
* DRM core CRTC related functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Keith Packard
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
char *name;
};
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
{ \
int i; \
for (i = 0; i < ARRAY_SIZE(list); i++) { \
if (list[i].type == val) \
return list[i].name; \
} \
return "(unknown)"; \
}
/*
* Global properties
*/
static struct drm_prop_enum_list drm_dpms_enum_list[] =
{ { DRM_MODE_DPMS_ON, "On" },
{ DRM_MODE_DPMS_STANDBY, "Standby" },
{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
{ DRM_MODE_DPMS_OFF, "Off" }
};
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
/*
* Optional properties
*/
static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{
{ DRM_MODE_SCALE_NONE, "None" },
{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
{ DRM_MODE_SCALE_CENTER, "Center" },
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
{ DRM_MODE_DITHERING_AUTO, "Automatic" },
};
/*
* Non-global properties, but "required" for certain connectors.
*/
static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
};
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
};
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
static struct drm_prop_enum_list drm_tv_select_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
};
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
{
{ DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
};
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
{ DRM_MODE_DIRTY_OFF, "Off" },
{ DRM_MODE_DIRTY_ON, "On" },
{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
};
DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
drm_dirty_info_enum_list)
struct drm_conn_prop_enum_list {
int type;
char *name;
int count;
};
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
{ { DRM_MODE_ENCODER_NONE, "None" },
{ DRM_MODE_ENCODER_DAC, "DAC" },
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
char *drm_get_encoder_name(struct drm_encoder *encoder)
{
static char buf[32];
snprintf(buf, 32, "%s-%d",
drm_encoder_enum_list[encoder->encoder_type].name,
encoder->base.id);
return buf;
}
EXPORT_SYMBOL(drm_get_encoder_name);
char *drm_get_connector_name(struct drm_connector *connector)
{
static char buf[32];
snprintf(buf, 32, "%s-%d",
drm_connector_enum_list[connector->connector_type].name,
connector->connector_type_id);
return buf;
}
EXPORT_SYMBOL(drm_get_connector_name);
char *drm_get_connector_status_name(enum drm_connector_status status)
{
if (status == connector_status_connected)
return "connected";
else if (status == connector_status_disconnected)
return "disconnected";
else
return "unknown";
}
/**
* drm_mode_object_get - allocate a new identifier
* @dev: DRM device
* @ptr: object pointer, used to generate unique ID
* @type: object type
*
* LOCKING:
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
*
* RETURNS:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
*/
static int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
int new_id = 0;
int ret;
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
return -EINVAL;
}
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
obj->id = new_id;
obj->type = obj_type;
return 0;
}
/**
* drm_mode_object_put - free an identifer
* @dev: DRM device
* @id: ID to free
*
* LOCKING:
* Caller must hold DRM mode_config lock.
*
* Free @id from @dev's unique identifier pool.
*/
static void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, object->id);
mutex_unlock(&dev->mode_config.idr_mutex);
}
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
*
* LOCKING:
* Caller must hold mode config lock.
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
* RETURNS:
* Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
{
int ret;
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret) {
return ret;
}
fb->dev = dev;
fb->funcs = funcs;
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
return 0;
}
EXPORT_SYMBOL(drm_framebuffer_init);
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
* LOCKING:
* Caller must hold mode config lock.
*
* Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
* it, setting it to NULL.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
set.fb = NULL;
ret = crtc->funcs->set_config(&set);
if (ret)
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
drm_mode_object_put(dev, &fb->base);
list_del(&fb->head);
dev->mode_config.num_fb--;
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
/**
* drm_crtc_init - Initialise a new CRTC object
* @dev: DRM device
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
* LOCKING:
* Caller must hold mode config lock.
*
* Inits a new object created as base part of an driver crtc object.
*/
void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
crtc->dev = dev;
crtc->funcs = funcs;
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_crtc_init);
/**
* drm_crtc_cleanup - Cleans up the core crtc usage.
* @crtc: CRTC to cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Cleanup @crtc. Removes from drm modesetting space
* does NOT free object, caller does that.
*/
void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
if (crtc->gamma_store) {
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
}
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
}
EXPORT_SYMBOL(drm_crtc_cleanup);
/**
* drm_mode_probed_add - add a mode to a connector's probed mode list
* @connector: connector the new mode
* @mode: mode data
*
* LOCKING:
* Caller must hold mode config lock.
*
* Add @mode to @connector's mode list for later use.
*/
void drm_mode_probed_add(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_add(&mode->head, &connector->probed_modes);
}
EXPORT_SYMBOL(drm_mode_probed_add);
/**
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
* @mode: mode to remove
*
* LOCKING:
* Caller must hold mode config lock.
*
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
struct drm_display_mode *mode)
{
list_del(&mode->head);
kfree(mode);
}
EXPORT_SYMBOL(drm_mode_remove);
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
* @name: user visible name of the connector
*
* LOCKING:
* Caller must hold @dev's mode_config lock.
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*/
void drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type)
{
mutex_lock(&dev->mode_config.mutex);
connector->dev = dev;
connector->funcs = funcs;
drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
INIT_LIST_HEAD(&connector->user_modes);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_connector_attach_property(connector,
dev->mode_config.edid_property,
0);
drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_init);
/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
* LOCKING:
* Caller must hold @dev's mode_config lock.
*
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_cleanup);
void drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type)
{
mutex_lock(&dev->mode_config.mutex);
encoder->dev = dev;
drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_encoder_init);
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &encoder->base);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
*
* LOCKING:
* Caller must hold DRM mode_config lock.
*
* Create a new drm_display_mode, give it an ID, and return it.
*
* RETURNS:
* Pointer to new mode on success, NULL on error.
*/
struct drm_display_mode *drm_mode_create(struct drm_device *dev)
{
struct drm_display_mode *nmode;
nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
if (!nmode)
return NULL;
drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
/**
* drm_mode_destroy - remove a mode
* @dev: DRM device
* @mode: mode to remove
*
* LOCKING:
* Caller must hold mode config lock.
*
* Free @mode's unique identifier, then free it.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
drm_mode_object_put(dev, &mode->base);
kfree(mode);
}
EXPORT_SYMBOL(drm_mode_destroy);
static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
int i;
/*
* Standard properties (apply to all connectors)
*/
edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"EDID", 0);
dev->mode_config.edid_property = edid;
dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"DPMS", ARRAY_SIZE(drm_dpms_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
drm_dpms_enum_list[i].name);
dev->mode_config.dpms_property = dpms;
return 0;
}
/**
* drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
* @dev: DRM device
*
* Called by a driver the first time a DVI-I connector is made.
*/
int drm_mode_create_dvi_i_properties(struct drm_device *dev)
{
struct drm_property *dvi_i_selector;
struct drm_property *dvi_i_subconnector;
int i;
if (dev->mode_config.dvi_i_select_subconnector_property)
return 0;
dvi_i_selector =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"select subconnector",
ARRAY_SIZE(drm_dvi_i_select_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
drm_property_add_enum(dvi_i_selector, i,
drm_dvi_i_select_enum_list[i].type,
drm_dvi_i_select_enum_list[i].name);
dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
dvi_i_subconnector =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
"subconnector",
ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
drm_property_add_enum(dvi_i_subconnector, i,
drm_dvi_i_subconnector_enum_list[i].type,
drm_dvi_i_subconnector_enum_list[i].name);
dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
* drm_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format
*
* Called by a driver's TV initialization routine, this function creates
* the TV specific connector properties for a given device. Caller is
* responsible for allocating a list of format names and passing them to
* this routine.
*/
int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
char *modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
int i;
if (dev->mode_config.tv_select_subconnector_property)
return 0;
/*
* Basic connector properties
*/
tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"select subconnector",
ARRAY_SIZE(drm_tv_select_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
drm_property_add_enum(tv_selector, i,
drm_tv_select_enum_list[i].type,
drm_tv_select_enum_list[i].name);
dev->mode_config.tv_select_subconnector_property = tv_selector;
tv_subconnector =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE, "subconnector",
ARRAY_SIZE(drm_tv_subconnector_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
drm_property_add_enum(tv_subconnector, i,
drm_tv_subconnector_enum_list[i].type,
drm_tv_subconnector_enum_list[i].name);
dev->mode_config.tv_subconnector_property = tv_subconnector;
/*
* Other, TV specific properties: margins & TV modes.
*/
dev->mode_config.tv_left_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"left margin", 2);
dev->mode_config.tv_left_margin_property->values[0] = 0;
dev->mode_config.tv_left_margin_property->values[1] = 100;
dev->mode_config.tv_right_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"right margin", 2);
dev->mode_config.tv_right_margin_property->values[0] = 0;
dev->mode_config.tv_right_margin_property->values[1] = 100;
dev->mode_config.tv_top_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"top margin", 2);
dev->mode_config.tv_top_margin_property->values[0] = 0;
dev->mode_config.tv_top_margin_property->values[1] = 100;
dev->mode_config.tv_bottom_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"bottom margin", 2);
dev->mode_config.tv_bottom_margin_property->values[0] = 0;
dev->mode_config.tv_bottom_margin_property->values[1] = 100;
dev->mode_config.tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", num_modes);
for (i = 0; i < num_modes; i++)
drm_property_add_enum(dev->mode_config.tv_mode_property, i,
i, modes[i]);
dev->mode_config.tv_brightness_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"brightness", 2);
dev->mode_config.tv_brightness_property->values[0] = 0;
dev->mode_config.tv_brightness_property->values[1] = 100;
dev->mode_config.tv_contrast_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"contrast", 2);
dev->mode_config.tv_contrast_property->values[0] = 0;
dev->mode_config.tv_contrast_property->values[1] = 100;
dev->mode_config.tv_flicker_reduction_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"flicker reduction", 2);
dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
dev->mode_config.tv_overscan_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"overscan", 2);
dev->mode_config.tv_overscan_property->values[0] = 0;
dev->mode_config.tv_overscan_property->values[1] = 100;
dev->mode_config.tv_saturation_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"saturation", 2);
dev->mode_config.tv_saturation_property->values[0] = 0;
dev->mode_config.tv_saturation_property->values[1] = 100;
dev->mode_config.tv_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"hue", 2);
dev->mode_config.tv_hue_property->values[0] = 0;
dev->mode_config.tv_hue_property->values[1] = 100;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_tv_properties);
/**
* drm_mode_create_scaling_mode_property - create scaling mode property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*/
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
struct drm_property *scaling_mode;
int i;
if (dev->mode_config.scaling_mode_property)
return 0;
scaling_mode =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
ARRAY_SIZE(drm_scaling_mode_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
drm_property_add_enum(scaling_mode, i,
drm_scaling_mode_enum_list[i].type,
drm_scaling_mode_enum_list[i].name);
dev->mode_config.scaling_mode_property = scaling_mode;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
* drm_mode_create_dithering_property - create dithering property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*/
int drm_mode_create_dithering_property(struct drm_device *dev)
{
struct drm_property *dithering_mode;
int i;
if (dev->mode_config.dithering_mode_property)
return 0;
dithering_mode =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
ARRAY_SIZE(drm_dithering_mode_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
drm_property_add_enum(dithering_mode, i,
drm_dithering_mode_enum_list[i].type,
drm_dithering_mode_enum_list[i].name);
dev->mode_config.dithering_mode_property = dithering_mode;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dithering_property);
/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
* Called by a driver the first time it's needed, must be attached to desired
* connectors.
*/
int drm_mode_create_dirty_info_property(struct drm_device *dev)
{
struct drm_property *dirty_info;
int i;
if (dev->mode_config.dirty_info_property)
return 0;
dirty_info =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
"dirty",
ARRAY_SIZE(drm_dirty_info_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
drm_property_add_enum(dirty_info, i,
drm_dirty_info_enum_list[i].type,
drm_dirty_info_enum_list[i].name);
dev->mode_config.dirty_info_property = dirty_info;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
* LOCKING:
* None, should happen single threaded at init time.
*
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
*/
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
idr_init(&dev->mode_config.crtc_idr);
mutex_lock(&dev->mode_config.mutex);
drm_mode_create_standard_connector_properties(dev);
mutex_unlock(&dev->mode_config.mutex);
/* Just to be sure */
dev->mode_config.num_fb = 0;
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
}
EXPORT_SYMBOL(drm_mode_config_init);
int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
return 0;
}
int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_mode_group *group)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
if ((ret = drm_mode_group_init(dev, group)))
return ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
group->id_list[group->num_crtcs++] = crtc->base.id;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
group->id_list[group->num_crtcs + group->num_encoders++] =
encoder->base.id;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
return 0;
}
/**
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
* LOCKING:
* Caller must hold mode config lock.
*
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
struct drm_connector *connector, *ot;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
encoder->funcs->destroy(encoder);
}
list_for_each_entry_safe(connector, ot,
&dev->mode_config.connector_list, head) {
connector->funcs->destroy(connector);
}
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
head) {
drm_property_destroy(dev, property);
}
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
fb->funcs->destroy(fb);
}
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
crtc->funcs->destroy(crtc);
}
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
* LOCKING:
* None.
*
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
struct drm_display_mode *in)
{
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
/**
* drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
* LOCKING:
* None.
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*/
void drm_crtc_convert_umode(struct drm_display_mode *out,
struct drm_mode_modeinfo *in)
{
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
/**
* drm_mode_getresources - get graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_card_res *card_res = data;
struct list_head *lh;
struct drm_framebuffer *fb;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
int ret = 0;
int connector_count = 0;
int crtc_count = 0;
int fb_count = 0;
int encoder_count = 0;
int copied = 0, i;
uint32_t __user *fb_id;
uint32_t __user *crtc_id;
uint32_t __user *connector_id;
uint32_t __user *encoder_id;
struct drm_mode_group *mode_group;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
/*
* For the non-control nodes we need to limit the list of resources
* by IDs in the group list for this node
*/
list_for_each(lh, &file_priv->fbs)
fb_count++;
mode_group = &file_priv->master->minor->mode_group;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each(lh, &dev->mode_config.crtc_list)
crtc_count++;
list_for_each(lh, &dev->mode_config.connector_list)
connector_count++;
list_for_each(lh, &dev->mode_config.encoder_list)
encoder_count++;
} else {
crtc_count = mode_group->num_crtcs;
connector_count = mode_group->num_connectors;
encoder_count = mode_group->num_encoders;
}
card_res->max_height = dev->mode_config.max_height;
card_res->min_height = dev->mode_config.min_height;
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
/* handle this in 4 parts */
/* FBs */
if (card_res->count_fbs >= fb_count) {
copied = 0;
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (put_user(fb->base.id, fb_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
card_res->count_fbs = fb_count;
/* CRTCs */
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
} else {
for (i = 0; i < mode_group->num_crtcs; i++) {
if (put_user(mode_group->id_list[i],
crtc_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
card_res->count_crtcs = crtc_count;
/* Encoders */
if (card_res->count_encoders >= encoder_count) {
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
drm_get_encoder_name(encoder));
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
} else {
for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
if (put_user(mode_group->id_list[i],
encoder_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
card_res->count_encoders = encoder_count;
/* Connectors */
if (card_res->count_connectors >= connector_count) {
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
} else {
int start = mode_group->num_crtcs +
mode_group->num_encoders;
for (i = start; i < start + mode_group->num_connectors; i++) {
if (put_user(mode_group->id_list[i],
connector_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
card_res->count_connectors = connector_count;
DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
card_res->count_connectors, card_res->count_encoders);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_getcrtc - get CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
*
* Construct a CRTC configuration structure to return to the user.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
struct drm_mode_object *obj;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
if (crtc->fb)
crtc_resp->fb_id = crtc->fb->base.id;
else
crtc_resp->fb_id = 0;
if (crtc->enabled) {
drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
crtc_resp->mode_valid = 1;
} else {
crtc_resp->mode_valid = 0;
}
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_getconnector - get connector configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
*
* Construct a connector configuration structure to return to the user.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_connector *out_resp = data;
struct drm_mode_object *obj;
struct drm_connector *connector;
struct drm_display_mode *mode;
int mode_count = 0;
int props_count = 0;
int encoders_count = 0;
int ret = 0;
int copied = 0;
int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *prop_ptr;
uint64_t __user *prop_values;
uint32_t __user *encoder_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->connector_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -EINVAL;
goto out;
}
connector = obj_to_connector(obj);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
props_count++;
}
}
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
encoders_count++;
}
}
if (out_resp->count_modes == 0) {
connector->funcs->fill_modes(connector,
dev->mode_config.max_width,
dev->mode_config.max_height);
}
/* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head)
mode_count++;
out_resp->connector_id = connector->base.id;
out_resp->connector_type = connector->connector_type;
out_resp->connector_type_id = connector->connector_type_id;
out_resp->mm_width = connector->display_info.width_mm;
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
if (connector->encoder)
out_resp->encoder_id = connector->encoder->base.id;
else
out_resp->encoder_id = 0;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
out_resp->count_modes = mode_count;
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (put_user(connector->property_ids[i],
prop_ptr + copied)) {
ret = -EFAULT;
goto out;
}
if (put_user(connector->property_values[i],
prop_values + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
out_resp->count_props = props_count;
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
encoder_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
}
out_resp->count_encoders = encoders_count;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_mode_object *obj;
struct drm_encoder *encoder;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
ret = -EINVAL;
goto out;
}
encoder = obj_to_encoder(obj);
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
enc_resp->possible_crtcs = encoder->possible_crtcs;
enc_resp->possible_clones = encoder->possible_clones;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
*
* Build a new CRTC configuration based on user request.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc, *crtcfb;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
int ret = 0;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
list_for_each_entry(crtcfb,
&dev->mode_config.crtc_list, head) {
if (crtcfb == crtc) {
DRM_DEBUG_KMS("Using current fb for "
"setmode\n");
fb = crtc->fb;
}
}
} else {
obj = drm_mode_object_find(dev, crtc_req->fb_id,
DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
goto out;
}
fb = obj_to_fb(obj);
}
mode = drm_mode_create(dev);
drm_crtc_convert_umode(mode, &crtc_req->mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
}
if (crtc_req->count_connectors == 0 && mode) {
DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
ret = -EINVAL;
goto out;
}
if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
crtc_req->count_connectors);
ret = -EINVAL;
goto out;
}
if (crtc_req->count_connectors > 0) {
u32 out_id;
/* Avoid unbounded kernel memory allocation */
if (crtc_req->count_connectors > config->num_connector) {
ret = -EINVAL;
goto out;
}
connector_set = kmalloc(crtc_req->count_connectors *
sizeof(struct drm_connector *),
GFP_KERNEL);
if (!connector_set) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < crtc_req->count_connectors; i++) {
set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
}
obj = drm_mode_object_find(dev, out_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
ret = -EINVAL;
goto out;
}
connector = obj_to_connector(obj);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector));
connector_set[i] = connector;
}
}
set.crtc = crtc;
set.x = crtc_req->x;
set.y = crtc_req->y;
set.mode = mode;
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
ret = crtc->funcs->set_config(&set);
out:
kfree(connector_set);
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_cursor *req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if (!req->flags) {
DRM_ERROR("no operation set\n");
return -EINVAL;
}
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
DRM_ERROR("crtc does not support cursor\n");
ret = -ENXIO;
goto out;
}
/* Turns off the cursor if handle is 0 */
ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
req->width, req->height);
}
if (req->flags & DRM_MODE_CURSOR_MOVE) {
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
}
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Add a new FB to the specified CRTC, given a user request.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("mode new framebuffer width not within limits\n");
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_ERROR("mode new framebuffer height not within limits\n");
return -EINVAL;
}
mutex_lock(&dev->mode_config.mutex);
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_ERROR("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_rmfb - remove an FB from the configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Remove the FB specified by the user.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
uint32_t *id = data;
int ret = 0;
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
fb = obj_to_fb(obj);
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
if (!found) {
DRM_ERROR("tried to remove a fb that we didn't own\n");
ret = -EINVAL;
goto out;
}
/* TODO release all crtc connected to the framebuffer */
/* TODO unhock the destructor from the buffer object */
list_del(&fb->filp_head);
fb->funcs->destroy(fb);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_mode_getfb - get FB info
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
*
* Lookup the FB given its ID and return info about it.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
struct drm_mode_object *obj;
struct drm_framebuffer *fb;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
fb = obj_to_fb(obj);
r->height = fb->height;
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitch;
fb->funcs->create_handle(fb, file_priv, &r->handle);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
fb = obj_to_fb(obj);
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
goto out_err1;
}
flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
/* If userspace annotates copy, clips must come in pairs */
if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
ret = -EINVAL;
goto out_err1;
}
if (num_clips && clips_ptr) {
if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
ret = -EINVAL;
goto out_err1;
}
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
goto out_err1;
}
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
if (ret) {
ret = -EFAULT;
goto out_err2;
}
}
if (fb->funcs->dirty) {
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
} else {
ret = -ENOSYS;
goto out_err2;
}
out_err2:
kfree(clips);
out_err1:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_fb_release - remove and free the FBs on this file
* @filp: file * from the ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Destroy all the FBs associated with @filp.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
{
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
list_del(&fb->filp_head);
fb->funcs->destroy(fb);
}
mutex_unlock(&dev->mode_config.mutex);
}
/**
* drm_mode_attachmode - add a mode to the user mode list
* @dev: DRM device
* @connector: connector to add the mode to
* @mode: mode to add
*
* Add @mode to @connector's user mode list.
*/
static int drm_mode_attachmode(struct drm_device *dev,
struct drm_connector *connector,
struct drm_display_mode *mode)
{
int ret = 0;
list_add_tail(&mode->head, &connector->user_modes);
return ret;
}
int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_connector *connector;
int ret = 0;
struct drm_display_mode *dup_mode;
int need_dup = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
break;
if (connector->encoder->crtc == crtc) {
if (need_dup)
dup_mode = drm_mode_duplicate(dev, mode);
else
dup_mode = mode;
ret = drm_mode_attachmode(dev, connector, dup_mode);
if (ret)
return ret;
need_dup = 1;
}
}
return 0;
}
EXPORT_SYMBOL(drm_mode_attachmode_crtc);
static int drm_mode_detachmode(struct drm_device *dev,
struct drm_connector *connector,
struct drm_display_mode *mode)
{
int found = 0;
int ret = 0;
struct drm_display_mode *match_mode, *t;
list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
if (drm_mode_equal(match_mode, mode)) {
list_del(&match_mode->head);
drm_mode_destroy(dev, match_mode);
found = 1;
break;
}
}
if (!found)
ret = -EINVAL;
return ret;
}
int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
{
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_mode_detachmode(dev, connector, mode);
}
return 0;
}
EXPORT_SYMBOL(drm_mode_detachmode_crtc);
/**
* drm_fb_attachmode - Attach a user mode to an connector
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* This attaches a user specified mode to an connector.
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_attachmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_mode_cmd *mode_cmd = data;
struct drm_connector *connector;
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -EINVAL;
goto out;
}
connector = obj_to_connector(obj);
mode = drm_mode_create(dev);
if (!mode) {
ret = -ENOMEM;
goto out;
}
drm_crtc_convert_umode(mode, umode);
ret = drm_mode_attachmode(dev, connector, mode);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
/**
* drm_fb_detachmode - Detach a user specified mode from an connector
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_detachmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_mode_cmd *mode_cmd = data;
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
ret = -EINVAL;
goto out;
}
connector = obj_to_connector(obj);
drm_crtc_convert_umode(&mode, umode);
ret = drm_mode_detachmode(dev, connector, &mode);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values)
{
struct drm_property *property = NULL;
property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
if (!property)
return NULL;
if (num_values) {
property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
if (!property->values)
goto fail;
}
drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
property->flags = flags;
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_blob_list);
if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
property->name[DRM_PROP_NAME_LEN-1] = '\0';
}
list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
fail:
kfree(property);
return NULL;
}
EXPORT_SYMBOL(drm_property_create);
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
struct drm_property_enum *prop_enum;
if (!(property->flags & DRM_MODE_PROP_ENUM))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (prop_enum->value == value) {
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
return 0;
}
}
}
prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
if (!prop_enum)
return -ENOMEM;
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
prop_enum->value = value;
property->values[index] = value;
list_add_tail(&prop_enum->head, &property->enum_blob_list);
return 0;
}
EXPORT_SYMBOL(drm_property_add_enum);
void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
{
struct drm_property_enum *prop_enum, *pt;
list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
list_del(&prop_enum->head);
kfree(prop_enum);
}
if (property->num_values)
kfree(property->values);
drm_mode_object_put(dev, &property->base);
list_del(&property->head);
kfree(property);
}
EXPORT_SYMBOL(drm_property_destroy);
int drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == 0) {
connector->property_ids[i] = property->base.id;
connector->property_values[i] = init_val;
break;
}
}
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_property);
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
connector->property_values[i] = value;
break;
}
}
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_property_set_value);
int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property, uint64_t *val)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
*val = connector->property_values[i];
break;
}
}
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
int blob_count = 0;
int value_count = 0;
int ret = 0, i;
int copied;
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
uint32_t *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
goto done;
}
property = obj_to_property(obj);
if (property->flags & DRM_MODE_PROP_ENUM) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
value_count = property->num_values;
strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
out_resp->flags = property->flags;
if ((out_resp->count_values >= value_count) && value_count) {
values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
goto done;
}
}
}
out_resp->count_values = value_count;
if (property->flags & DRM_MODE_PROP_ENUM) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
ret = -EFAULT;
goto done;
}
if (copy_to_user(&enum_ptr[copied].name,
&prop_enum->name, DRM_PROP_NAME_LEN)) {
ret = -EFAULT;
goto done;
}
copied++;
}
}
out_resp->count_enum_blobs = enum_count;
}
if (property->flags & DRM_MODE_PROP_BLOB) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
ret = -EFAULT;
goto done;
}
if (put_user(prop_blob->length, blob_length_ptr + copied)) {
ret = -EFAULT;
goto done;
}
copied++;
}
}
out_resp->count_enum_blobs = blob_count;
}
done:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
void *data)
{
struct drm_property_blob *blob;
if (!length || !data)
return NULL;
blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return NULL;
blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
blob->length = length;
memcpy(blob->data, data, length);
drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
return blob;
}
static void drm_property_destroy_blob(struct drm_device *dev,
struct drm_property_blob *blob)
{
drm_mode_object_put(dev, &blob->base);
list_del(&blob->head);
kfree(blob);
}
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
void *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
goto done;
}
blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
blob_ptr = (void *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
}
}
out_resp->length = blob->length;
done:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
return ret;
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_connector_set_property *out_resp = data;
struct drm_mode_object *obj;
struct drm_property *property;
struct drm_connector *connector;
int ret = -EINVAL;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
goto out;
}
connector = obj_to_connector(obj);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == out_resp->prop_id)
break;
}
if (i == DRM_CONNECTOR_MAX_PROPERTY) {
goto out;
}
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
goto out;
}
property = obj_to_property(obj);
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
goto out;
if (property->flags & DRM_MODE_PROP_RANGE) {
if (out_resp->value < property->values[0])
goto out;
if (out_resp->value > property->values[1])
goto out;
} else {
int found = 0;
for (i = 0; i < property->num_values; i++) {
if (property->values[i] == out_resp->value) {
found = 1;
break;
}
}
if (!found) {
goto out;
}
}
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int) out_resp->value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id;
return 0;
}
}
return -ENOMEM;
}
EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == encoder->base.id) {
connector->encoder_ids[i] = 0;
if (connector->encoder == encoder)
connector->encoder = NULL;
break;
}
}
}
EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
crtc->gamma_size = gamma_size;
crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return false;
}
return true;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
goto out;
}
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
ret = -EFAULT;
goto out;
}
g_base = r_base + size;
if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
ret = -EFAULT;
goto out;
}
b_base = g_base + size;
if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
ret = -EFAULT;
goto out;
}
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_lut *crtc_lut = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
void *r_base, *g_base, *b_base;
int size;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
goto out;
}
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
ret = -EFAULT;
goto out;
}
g_base = r_base + size;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
ret = -EFAULT;
goto out;
}
b_base = g_base + size;
if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
ret = -EFAULT;
goto out;
}
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_crtc_page_flip *page_flip = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
page_flip->reserved != 0)
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
goto out;
crtc = obj_to_crtc(obj);
if (crtc->fb == NULL) {
/* The framebuffer is currently unbound, presumably
* due to a hotplug event, that userspace has not
* yet discovered.
*/
ret = -EBUSY;
goto out;
}
if (crtc->funcs->page_flip == NULL)
goto out;
obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
if (!obj)
goto out;
fb = obj_to_fb(obj);
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
ret = -ENOMEM;
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
file_priv->event_space -= sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
e = kzalloc(sizeof *e, GFP_KERNEL);
if (e == NULL) {
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof e->event;
e->event.user_data = page_flip->user_data;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
e->base.destroy =
(void (*) (struct drm_pending_event *)) kfree;
}
ret = crtc->funcs->page_flip(crtc, fb, e);
if (ret) {
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->funcs->reset)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_create_dumb *args = data;
if (!dev->driver->dumb_create)
return -ENOSYS;
return dev->driver->dumb_create(file_priv, dev, args);
}
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
/* call driver ioctl to get mmap offset */
if (!dev->driver->dumb_map_offset)
return -ENOSYS;
return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_destroy_dumb *args = data;
if (!dev->driver->dumb_destroy)
return -ENOSYS;
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3571_0 |
crossvul-cpp_data_good_2201_0 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* plugins/kdb/ldap/libkdb_ldap/ldap_principal2.c */
/*
* Copyright (c) 2004-2005, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <time.h>
#include "ldap_main.h"
#include "kdb_ldap.h"
#include "ldap_principal.h"
#include "princ_xdr.h"
#include "ldap_tkt_policy.h"
#include "ldap_pwd_policy.h"
#include "ldap_err.h"
#include <kadm5/admin.h>
extern char* principal_attributes[];
extern char* max_pwd_life_attr[];
static char *
getstringtime(krb5_timestamp);
krb5_error_code
berval2tl_data(struct berval *in, krb5_tl_data **out)
{
*out = (krb5_tl_data *) malloc (sizeof (krb5_tl_data));
if (*out == NULL)
return ENOMEM;
(*out)->tl_data_length = in->bv_len - 2;
(*out)->tl_data_contents = (krb5_octet *) malloc
((*out)->tl_data_length * sizeof (krb5_octet));
if ((*out)->tl_data_contents == NULL) {
free (*out);
return ENOMEM;
}
UNSTORE16_INT (in->bv_val, (*out)->tl_data_type);
memcpy ((*out)->tl_data_contents, in->bv_val + 2, (*out)->tl_data_length);
return 0;
}
/*
* look up a principal in the directory.
*/
krb5_error_code
krb5_ldap_get_principal(krb5_context context, krb5_const_principal searchfor,
unsigned int flags, krb5_db_entry **entry_ptr)
{
char *user=NULL, *filter=NULL, *filtuser=NULL;
unsigned int tree=0, ntrees=1, princlen=0;
krb5_error_code tempst=0, st=0;
char **values=NULL, **subtree=NULL, *cname=NULL;
LDAP *ld=NULL;
LDAPMessage *result=NULL, *ent=NULL;
krb5_ldap_context *ldap_context=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
krb5_principal cprinc=NULL;
krb5_boolean found=FALSE;
krb5_db_entry *entry = NULL;
*entry_ptr = NULL;
/* Clear the global error string */
krb5_clear_error_message(context);
if (searchfor == NULL)
return EINVAL;
dal_handle = context->dal_handle;
ldap_context = (krb5_ldap_context *) dal_handle->db_context;
CHECK_LDAP_HANDLE(ldap_context);
if (is_principal_in_realm(ldap_context, searchfor) != 0) {
st = KRB5_KDB_NOENTRY;
krb5_set_error_message(context, st,
_("Principal does not belong to realm"));
goto cleanup;
}
if ((st=krb5_unparse_name(context, searchfor, &user)) != 0)
goto cleanup;
if ((st=krb5_ldap_unparse_principal_name(user)) != 0)
goto cleanup;
filtuser = ldap_filter_correct(user);
if (filtuser == NULL) {
st = ENOMEM;
goto cleanup;
}
princlen = strlen(FILTER) + strlen(filtuser) + 2 + 1; /* 2 for closing brackets */
if ((filter = malloc(princlen)) == NULL) {
st = ENOMEM;
goto cleanup;
}
snprintf(filter, princlen, FILTER"%s))", filtuser);
if ((st = krb5_get_subtree_info(ldap_context, &subtree, &ntrees)) != 0)
goto cleanup;
GET_HANDLE();
for (tree=0; tree < ntrees && !found; ++tree) {
LDAP_SEARCH(subtree[tree], ldap_context->lrparams->search_scope, filter, principal_attributes);
for (ent=ldap_first_entry(ld, result); ent != NULL && !found; ent=ldap_next_entry(ld, ent)) {
/* get the associated directory user information */
if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) {
int i;
/* a wild-card in a principal name can return a list of kerberos principals.
* Make sure that the correct principal is returned.
* NOTE: a principalname k* in ldap server will return all the principals starting with a k
*/
for (i=0; values[i] != NULL; ++i) {
if (strcmp(values[i], user) == 0) {
found = TRUE;
break;
}
}
ldap_value_free(values);
if (!found) /* no matching principal found */
continue;
}
if ((values=ldap_get_values(ld, ent, "krbcanonicalname")) != NULL) {
if (values[0] && strcmp(values[0], user) != 0) {
/* We matched an alias, not the canonical name. */
if (flags & KRB5_KDB_FLAG_ALIAS_OK) {
st = krb5_ldap_parse_principal_name(values[0], &cname);
if (st != 0)
goto cleanup;
st = krb5_parse_name(context, cname, &cprinc);
if (st != 0)
goto cleanup;
} else /* No canonicalization, so don't return aliases. */
found = FALSE;
}
ldap_value_free(values);
if (!found)
continue;
}
entry = k5alloc(sizeof(*entry), &st);
if (entry == NULL)
goto cleanup;
if ((st = populate_krb5_db_entry(context, ldap_context, ld, ent,
cprinc ? cprinc : searchfor,
entry)) != 0)
goto cleanup;
}
ldap_msgfree(result);
result = NULL;
} /* for (tree=0 ... */
if (found) {
*entry_ptr = entry;
entry = NULL;
} else
st = KRB5_KDB_NOENTRY;
cleanup:
ldap_msgfree(result);
krb5_ldap_free_principal(context, entry);
if (filter)
free (filter);
if (subtree) {
for (; ntrees; --ntrees)
if (subtree[ntrees-1])
free (subtree[ntrees-1]);
free (subtree);
}
if (ldap_server_handle)
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
if (user)
free(user);
if (filtuser)
free(filtuser);
if (cname)
free(cname);
if (cprinc)
krb5_free_principal(context, cprinc);
return st;
}
typedef enum{ ADD_PRINCIPAL, MODIFY_PRINCIPAL } OPERATION;
/*
* ptype is creating confusions. Additionally the logic
* surronding ptype is redundunt and can be achevied
* with the help of dn and containerdn members.
* so dropping the ptype member
*/
typedef struct _xargs_t {
char *dn;
char *linkdn;
krb5_boolean dn_from_kbd;
char *containerdn;
char *tktpolicydn;
}xargs_t;
static void
free_xargs(xargs_t xargs)
{
if (xargs.dn)
free (xargs.dn);
if (xargs.linkdn)
free(xargs.linkdn);
if (xargs.containerdn)
free (xargs.containerdn);
if (xargs.tktpolicydn)
free (xargs.tktpolicydn);
}
static krb5_error_code
process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
krb5_set_error_message(context, st,
_("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
krb5_set_error_message(context, st,
_("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
krb5int_access accessor;
static krb5_error_code
asn1_encode_sequence_of_keys(krb5_key_data *key_data, krb5_int16 n_key_data,
krb5_int32 mkvno, krb5_data **code)
{
krb5_error_code err;
ldap_seqof_key_data val;
/*
* This should be pushed back into other library initialization
* code.
*/
err = kldap_ensure_initialized ();
if (err)
return err;
val.key_data = key_data;
val.n_key_data = n_key_data;
val.mkvno = mkvno;
val.kvno = key_data[0].key_data_kvno;
return accessor.asn1_ldap_encode_sequence_of_keys(&val, code);
}
static krb5_error_code
asn1_decode_sequence_of_keys(krb5_data *in, krb5_key_data **out,
krb5_int16 *n_key_data, krb5_kvno *mkvno)
{
krb5_error_code err;
ldap_seqof_key_data *p;
int i;
/*
* This should be pushed back into other library initialization
* code.
*/
err = kldap_ensure_initialized ();
if (err)
return err;
err = accessor.asn1_ldap_decode_sequence_of_keys(in, &p);
if (err)
return err;
/* Set kvno and key_data_ver in each key_data element. */
for (i = 0; i < p->n_key_data; i++) {
p->key_data[i].key_data_kvno = p->kvno;
/* The decoder sets key_data_ver to 1 if no salt is present, but leaves
* it at 0 if salt is present. */
if (p->key_data[i].key_data_ver == 0)
p->key_data[i].key_data_ver = 2;
}
*out = p->key_data;
*n_key_data = p->n_key_data;
*mkvno = p->mkvno;
free(p);
return 0;
}
/* Decoding ASN.1 encoded key */
static struct berval **
krb5_encode_krbsecretkey(krb5_key_data *key_data_in, int n_key_data,
krb5_kvno mkvno) {
struct berval **ret = NULL;
int currkvno;
int num_versions = 1;
int i, j, last;
krb5_error_code err = 0;
krb5_key_data *key_data;
if (n_key_data <= 0)
return NULL;
/* Make a shallow copy of the key data so we can alter it. */
key_data = k5calloc(n_key_data, sizeof(*key_data), &err);
if (key_data_in == NULL)
goto cleanup;
memcpy(key_data, key_data_in, n_key_data * sizeof(*key_data));
/* Unpatched krb5 1.11 and 1.12 cannot decode KrbKey sequences with no salt
* field. For compatibility, always encode a salt field. */
for (i = 0; i < n_key_data; i++) {
if (key_data[i].key_data_ver == 1) {
key_data[i].key_data_ver = 2;
key_data[i].key_data_type[1] = KRB5_KDB_SALTTYPE_NORMAL;
key_data[i].key_data_length[1] = 0;
key_data[i].key_data_contents[1] = NULL;
}
}
/* Find the number of key versions */
for (i = 0; i < n_key_data - 1; i++)
if (key_data[i].key_data_kvno != key_data[i + 1].key_data_kvno)
num_versions++;
ret = (struct berval **) calloc (num_versions + 1, sizeof (struct berval *));
if (ret == NULL) {
err = ENOMEM;
goto cleanup;
}
for (i = 0, last = 0, j = 0, currkvno = key_data[0].key_data_kvno; i < n_key_data; i++) {
krb5_data *code;
if (i == n_key_data - 1 || key_data[i + 1].key_data_kvno != currkvno) {
ret[j] = k5alloc(sizeof(struct berval), &err);
if (ret[j] == NULL)
goto cleanup;
err = asn1_encode_sequence_of_keys(key_data + last,
(krb5_int16)i - last + 1,
mkvno, &code);
if (err)
goto cleanup;
/*CHECK_NULL(ret[j]); */
ret[j]->bv_len = code->length;
ret[j]->bv_val = code->data;
free(code);
j++;
last = i + 1;
if (i < n_key_data - 1)
currkvno = key_data[i + 1].key_data_kvno;
}
}
ret[num_versions] = NULL;
cleanup:
free(key_data);
if (err != 0) {
if (ret != NULL) {
for (i = 0; i <= num_versions; i++)
if (ret[i] != NULL)
free (ret[i]);
free (ret);
ret = NULL;
}
}
return ret;
}
static krb5_error_code
tl_data2berval (krb5_tl_data *in, struct berval **out)
{
*out = (struct berval *) malloc (sizeof (struct berval));
if (*out == NULL)
return ENOMEM;
(*out)->bv_len = in->tl_data_length + 2;
(*out)->bv_val = (char *) malloc ((*out)->bv_len);
if ((*out)->bv_val == NULL) {
free (*out);
return ENOMEM;
}
STORE16_INT((*out)->bv_val, in->tl_data_type);
memcpy ((*out)->bv_val + 2, in->tl_data_contents, in->tl_data_length);
return 0;
}
krb5_error_code
krb5_ldap_put_principal(krb5_context context, krb5_db_entry *entry,
char **db_args)
{
int l=0, kerberos_principal_object_type=0;
unsigned int ntrees=0, tre=0;
krb5_error_code st=0, tempst=0;
LDAP *ld=NULL;
LDAPMessage *result=NULL, *ent=NULL;
char **subtreelist = NULL;
char *user=NULL, *subtree=NULL, *principal_dn=NULL;
char **values=NULL, *strval[10]={NULL}, errbuf[1024];
char *filtuser=NULL;
struct berval **bersecretkey=NULL;
LDAPMod **mods=NULL;
krb5_boolean create_standalone_prinicipal=FALSE;
krb5_boolean krb_identity_exists=FALSE, establish_links=FALSE;
char *standalone_principal_dn=NULL;
krb5_tl_data *tl_data=NULL;
krb5_key_data **keys=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_context *ldap_context=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
osa_princ_ent_rec princ_ent = {0};
xargs_t xargs = {0};
char *polname = NULL;
OPERATION optype;
krb5_boolean found_entry = FALSE;
/* Clear the global error string */
krb5_clear_error_message(context);
SETUP_CONTEXT();
if (ldap_context->lrparams == NULL || ldap_context->container_dn == NULL)
return EINVAL;
/* get ldap handle */
GET_HANDLE();
if (is_principal_in_realm(ldap_context, entry->princ) != 0) {
st = EINVAL;
krb5_set_error_message(context, st, _("Principal does not belong to "
"the default realm"));
goto cleanup;
}
/* get the principal information to act on */
if (((st=krb5_unparse_name(context, entry->princ, &user)) != 0) ||
((st=krb5_ldap_unparse_principal_name(user)) != 0))
goto cleanup;
filtuser = ldap_filter_correct(user);
if (filtuser == NULL) {
st = ENOMEM;
goto cleanup;
}
/* Identity the type of operation, it can be
* add principal or modify principal.
* hack if the entry->mask has KRB_PRINCIPAL flag set
* then it is a add operation
*/
if (entry->mask & KADM5_PRINCIPAL)
optype = ADD_PRINCIPAL;
else
optype = MODIFY_PRINCIPAL;
if (((st=krb5_get_princ_type(context, entry, &kerberos_principal_object_type)) != 0) ||
((st=krb5_get_userdn(context, entry, &principal_dn)) != 0))
goto cleanup;
if ((st=process_db_args(context, db_args, &xargs, optype)) != 0)
goto cleanup;
if (entry->mask & KADM5_LOAD) {
unsigned int tree = 0;
int numlentries = 0;
char *filter = NULL;
/* A load operation is special, will do a mix-in (add krbprinc
* attrs to a non-krb object entry) if an object exists with a
* matching krbprincipalname attribute so try to find existing
* object and set principal_dn. This assumes that the
* krbprincipalname attribute is unique (only one object entry has
* a particular krbprincipalname attribute).
*/
if (asprintf(&filter, FILTER"%s))", filtuser) < 0) {
filter = NULL;
st = ENOMEM;
goto cleanup;
}
/* get the current subtree list */
if ((st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees)) != 0)
goto cleanup;
found_entry = FALSE;
/* search for entry with matching krbprincipalname attribute */
for (tree = 0; found_entry == FALSE && tree < ntrees; ++tree) {
if (principal_dn == NULL) {
LDAP_SEARCH_1(subtreelist[tree], ldap_context->lrparams->search_scope, filter, principal_attributes, IGNORE_STATUS);
} else {
/* just look for entry with principal_dn */
LDAP_SEARCH_1(principal_dn, LDAP_SCOPE_BASE, filter, principal_attributes, IGNORE_STATUS);
}
if (st == LDAP_SUCCESS) {
numlentries = ldap_count_entries(ld, result);
if (numlentries > 1) {
free(filter);
st = EINVAL;
krb5_set_error_message(context, st,
_("operation can not continue, "
"more than one entry with "
"principal name \"%s\" found"),
user);
goto cleanup;
} else if (numlentries == 1) {
found_entry = TRUE;
if (principal_dn == NULL) {
ent = ldap_first_entry(ld, result);
if (ent != NULL) {
/* setting principal_dn will cause that entry to be modified further down */
if ((principal_dn = ldap_get_dn(ld, ent)) == NULL) {
ldap_get_option (ld, LDAP_OPT_RESULT_CODE, &st);
st = set_ldap_error (context, st, 0);
free(filter);
goto cleanup;
}
}
}
}
} else if (st != LDAP_NO_SUCH_OBJECT) {
/* could not perform search, return with failure */
st = set_ldap_error (context, st, 0);
free(filter);
goto cleanup;
}
ldap_msgfree(result);
result = NULL;
/*
* If it isn't found then assume a standalone princ entry is to
* be created.
*/
} /* end for (tree = 0; principal_dn == ... */
free(filter);
if (found_entry == FALSE && principal_dn != NULL) {
/*
* if principal_dn is null then there is code further down to
* deal with setting standalone_principal_dn. Also note that
* this will set create_standalone_prinicipal true for
* non-mix-in entries which is okay if loading from a dump.
*/
create_standalone_prinicipal = TRUE;
standalone_principal_dn = strdup(principal_dn);
CHECK_NULL(standalone_principal_dn);
}
} /* end if (entry->mask & KADM5_LOAD */
/* time to generate the DN information with the help of
* containerdn, principalcontainerreference or
* realmcontainerdn information
*/
if (principal_dn == NULL && xargs.dn == NULL) { /* creation of standalone principal */
/* get the subtree information */
if (entry->princ->length == 2 && entry->princ->data[0].length == strlen("krbtgt") &&
strncmp(entry->princ->data[0].data, "krbtgt", entry->princ->data[0].length) == 0) {
/* if the principal is a inter-realm principal, always created in the realm container */
subtree = strdup(ldap_context->lrparams->realmdn);
} else if (xargs.containerdn) {
if ((st=checkattributevalue(ld, xargs.containerdn, NULL, NULL, NULL)) != 0) {
if (st == KRB5_KDB_NOENTRY || st == KRB5_KDB_CONSTRAINT_VIOLATION) {
int ost = st;
st = EINVAL;
snprintf(errbuf, sizeof(errbuf), _("'%s' not found: "),
xargs.containerdn);
prepend_err_str(context, errbuf, st, ost);
}
goto cleanup;
}
subtree = strdup(xargs.containerdn);
} else if (ldap_context->lrparams->containerref && strlen(ldap_context->lrparams->containerref) != 0) {
/*
* Here the subtree should be changed with
* principalcontainerreference attribute value
*/
subtree = strdup(ldap_context->lrparams->containerref);
} else {
subtree = strdup(ldap_context->lrparams->realmdn);
}
CHECK_NULL(subtree);
if (asprintf(&standalone_principal_dn, "krbprincipalname=%s,%s",
filtuser, subtree) < 0)
standalone_principal_dn = NULL;
CHECK_NULL(standalone_principal_dn);
/*
* free subtree when you are done using the subtree
* set the boolean create_standalone_prinicipal to TRUE
*/
create_standalone_prinicipal = TRUE;
free(subtree);
subtree = NULL;
}
/*
* If the DN information is presented by the user, time to
* validate the input to ensure that the DN falls under
* any of the subtrees
*/
if (xargs.dn_from_kbd == TRUE) {
/* make sure the DN falls in the subtree */
int dnlen=0, subtreelen=0;
char *dn=NULL;
krb5_boolean outofsubtree=TRUE;
if (xargs.dn != NULL) {
dn = xargs.dn;
} else if (xargs.linkdn != NULL) {
dn = xargs.linkdn;
} else if (standalone_principal_dn != NULL) {
/*
* Even though the standalone_principal_dn is constructed
* within this function, there is the containerdn input
* from the user that can become part of the it.
*/
dn = standalone_principal_dn;
}
/* Get the current subtree list if we haven't already done so. */
if (subtreelist == NULL) {
st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees);
if (st)
goto cleanup;
}
for (tre=0; tre<ntrees; ++tre) {
if (subtreelist[tre] == NULL || strlen(subtreelist[tre]) == 0) {
outofsubtree = FALSE;
break;
} else {
dnlen = strlen (dn);
subtreelen = strlen(subtreelist[tre]);
if ((dnlen >= subtreelen) && (strcasecmp((dn + dnlen - subtreelen), subtreelist[tre]) == 0)) {
outofsubtree = FALSE;
break;
}
}
}
if (outofsubtree == TRUE) {
st = EINVAL;
krb5_set_error_message(context, st,
_("DN is out of the realm subtree"));
goto cleanup;
}
/*
* dn value will be set either by dn, linkdn or the standalone_principal_dn
* In the first 2 cases, the dn should be existing and in the last case we
* are supposed to create the ldap object. so the below should not be
* executed for the last case.
*/
if (standalone_principal_dn == NULL) {
/*
* If the ldap object is missing, this results in an error.
*/
/*
* Search for krbprincipalname attribute here.
* This is to find if a kerberos identity is already present
* on the ldap object, in which case adding a kerberos identity
* on the ldap object should result in an error.
*/
char *attributes[]={"krbticketpolicyreference", "krbprincipalname", NULL};
ldap_msgfree(result);
result = NULL;
LDAP_SEARCH_1(dn, LDAP_SCOPE_BASE, 0, attributes, IGNORE_STATUS);
if (st == LDAP_SUCCESS) {
ent = ldap_first_entry(ld, result);
if (ent != NULL) {
if ((values=ldap_get_values(ld, ent, "krbticketpolicyreference")) != NULL) {
ldap_value_free(values);
}
if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) {
krb_identity_exists = TRUE;
ldap_value_free(values);
}
}
} else {
st = set_ldap_error(context, st, OP_SEARCH);
goto cleanup;
}
}
}
/*
* If xargs.dn is set then the request is to add a
* kerberos principal on a ldap object, but if
* there is one already on the ldap object this
* should result in an error.
*/
if (xargs.dn != NULL && krb_identity_exists == TRUE) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("ldap object is already kerberized"));
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
if (xargs.linkdn != NULL) {
/*
* link information can be changed using modprinc.
* However, link information can be changed only on the
* standalone kerberos principal objects. A standalone
* kerberos principal object is of type krbprincipal
* structural objectclass.
*
* NOTE: kerberos principals on an ldap object can't be
* linked to other ldap objects.
*/
if (optype == MODIFY_PRINCIPAL &&
kerberos_principal_object_type != KDB_STANDALONE_PRINCIPAL_OBJECT) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("link information can not be set/updated as the "
"kerberos principal belongs to an ldap object"));
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
/*
* Check the link information. If there is already a link
* existing then this operation is not allowed.
*/
{
char **linkdns=NULL;
int j=0;
if ((st=krb5_get_linkdn(context, entry, &linkdns)) != 0) {
snprintf(errbuf, sizeof(errbuf),
_("Failed getting object references"));
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
if (linkdns != NULL) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("kerberos principal is already linked to a ldap "
"object"));
krb5_set_error_message(context, st, "%s", errbuf);
for (j=0; linkdns[j] != NULL; ++j)
free (linkdns[j]);
free (linkdns);
goto cleanup;
}
}
establish_links = TRUE;
}
if (entry->mask & KADM5_LAST_SUCCESS) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->last_success)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastSuccessfulAuth", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_LAST_FAILED) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->last_failed)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastFailedAuth", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free(strval[0]);
}
if (entry->mask & KADM5_FAIL_AUTH_COUNT) {
krb5_kvno fail_auth_count;
fail_auth_count = entry->fail_auth_count;
if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT)
fail_auth_count++;
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_REPLACE,
fail_auth_count);
if (st != 0)
goto cleanup;
} else if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) {
int attr_mask = 0;
krb5_boolean has_fail_count;
/* Check if the krbLoginFailedCount attribute exists. (Through
* krb5 1.8.1, it wasn't set in new entries.) */
st = krb5_get_attributes_mask(context, entry, &attr_mask);
if (st != 0)
goto cleanup;
has_fail_count = ((attr_mask & KDB_FAIL_AUTH_COUNT_ATTR) != 0);
/*
* If the client library and server supports RFC 4525,
* then use it to increment by one the value of the
* krbLoginFailedCount attribute. Otherwise, assert the
* (provided) old value by deleting it before adding.
*/
#ifdef LDAP_MOD_INCREMENT
if (ldap_server_handle->server_info->modify_increment &&
has_fail_count) {
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_INCREMENT, 1);
if (st != 0)
goto cleanup;
} else {
#endif /* LDAP_MOD_INCREMENT */
if (has_fail_count) {
st = krb5_add_int_mem_ldap_mod(&mods,
"krbLoginFailedCount",
LDAP_MOD_DELETE,
entry->fail_auth_count);
if (st != 0)
goto cleanup;
}
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_ADD,
entry->fail_auth_count + 1);
if (st != 0)
goto cleanup;
#ifdef LDAP_MOD_INCREMENT
}
#endif
} else if (optype == ADD_PRINCIPAL) {
/* Initialize krbLoginFailedCount in new entries to help avoid a
* race during the first failed login. */
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_ADD, 0);
}
if (entry->mask & KADM5_MAX_LIFE) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxticketlife", LDAP_MOD_REPLACE, entry->max_life)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_MAX_RLIFE) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxrenewableage", LDAP_MOD_REPLACE,
entry->max_renewable_life)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_ATTRIBUTES) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbticketflags", LDAP_MOD_REPLACE,
entry->attributes)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_PRINCIPAL) {
memset(strval, 0, sizeof(strval));
strval[0] = user;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalname", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_PRINC_EXPIRE_TIME) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalexpiration", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_PW_EXPIRATION) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration",
LDAP_MOD_REPLACE,
strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_POLICY) {
memset(&princ_ent, 0, sizeof(princ_ent));
for (tl_data=entry->tl_data; tl_data; tl_data=tl_data->tl_data_next) {
if (tl_data->tl_data_type == KRB5_TL_KADM_DATA) {
if ((st = krb5_lookup_tl_kadm_data(tl_data, &princ_ent)) != 0) {
goto cleanup;
}
break;
}
}
if (princ_ent.aux_attributes & KADM5_POLICY) {
memset(strval, 0, sizeof(strval));
if ((st = krb5_ldap_name_to_policydn (context, princ_ent.policy, &polname)) != 0)
goto cleanup;
strval[0] = polname;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
} else {
st = EINVAL;
krb5_set_error_message(context, st, "Password policy value null");
goto cleanup;
}
} else if (entry->mask & KADM5_LOAD && found_entry == TRUE) {
/*
* a load is special in that existing entries must have attrs that
* removed.
*/
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, NULL)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_POLICY_CLR) {
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_DELETE, NULL)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_KEY_DATA || entry->mask & KADM5_KVNO) {
krb5_kvno mkvno;
if ((st=krb5_dbe_lookup_mkvno(context, entry, &mkvno)) != 0)
goto cleanup;
bersecretkey = krb5_encode_krbsecretkey (entry->key_data,
entry->n_key_data, mkvno);
if ((st=krb5_add_ber_mem_ldap_mod(&mods, "krbprincipalkey",
LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, bersecretkey)) != 0)
goto cleanup;
if (!(entry->mask & KADM5_PRINCIPAL)) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods,
"krbpasswordexpiration",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
/* Update last password change whenever a new key is set */
{
krb5_timestamp last_pw_changed;
if ((st=krb5_dbe_lookup_last_pwd_change(context, entry,
&last_pw_changed)) != 0)
goto cleanup;
memset(strval, 0, sizeof(strval));
if ((strval[0] = getstringtime(last_pw_changed)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastPwdChange",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
} /* Modify Key data ends here */
/* Set tl_data */
if (entry->tl_data != NULL) {
int count = 0;
struct berval **ber_tl_data = NULL;
krb5_tl_data *ptr;
krb5_timestamp unlock_time;
for (ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) {
if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE
#ifdef SECURID
|| ptr->tl_data_type == KRB5_TL_DB_ARGS
#endif
|| ptr->tl_data_type == KRB5_TL_KADM_DATA
|| ptr->tl_data_type == KDB_TL_USER_INFO
|| ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL
|| ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK)
continue;
count++;
}
if (count != 0) {
int j;
ber_tl_data = (struct berval **) calloc (count + 1,
sizeof (struct berval*));
if (ber_tl_data == NULL) {
st = ENOMEM;
goto cleanup;
}
for (j = 0, ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) {
/* Ignore tl_data that are stored in separate directory
* attributes */
if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE
#ifdef SECURID
|| ptr->tl_data_type == KRB5_TL_DB_ARGS
#endif
|| ptr->tl_data_type == KRB5_TL_KADM_DATA
|| ptr->tl_data_type == KDB_TL_USER_INFO
|| ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL
|| ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK)
continue;
if ((st = tl_data2berval (ptr, &ber_tl_data[j])) != 0)
break;
j++;
}
if (st == 0) {
ber_tl_data[count] = NULL;
st=krb5_add_ber_mem_ldap_mod(&mods, "krbExtraData",
LDAP_MOD_REPLACE |
LDAP_MOD_BVALUES, ber_tl_data);
}
for (j = 0; ber_tl_data[j] != NULL; j++) {
free(ber_tl_data[j]->bv_val);
free(ber_tl_data[j]);
}
free(ber_tl_data);
if (st != 0)
goto cleanup;
}
if ((st=krb5_dbe_lookup_last_admin_unlock(context, entry,
&unlock_time)) != 0)
goto cleanup;
if (unlock_time != 0) {
/* Update last admin unlock */
memset(strval, 0, sizeof(strval));
if ((strval[0] = getstringtime(unlock_time)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastAdminUnlock",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
}
/* Directory specific attribute */
if (xargs.tktpolicydn != NULL) {
int tmask=0;
if (strlen(xargs.tktpolicydn) != 0) {
st = checkattributevalue(ld, xargs.tktpolicydn, "objectclass", policyclass, &tmask);
CHECK_CLASS_VALIDITY(st, tmask, _("ticket policy object value: "));
strval[0] = xargs.tktpolicydn;
strval[1] = NULL;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
} else {
/* if xargs.tktpolicydn is a empty string, then delete
* already existing krbticketpolicyreference attr */
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_DELETE, NULL)) != 0)
goto cleanup;
}
}
if (establish_links == TRUE) {
memset(strval, 0, sizeof(strval));
strval[0] = xargs.linkdn;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbObjectReferences", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
}
/*
* in case mods is NULL then return
* not sure but can happen in a modprinc
* so no need to return an error
* addprinc will at least have the principal name
* and the keys passed in
*/
if (mods == NULL)
goto cleanup;
if (create_standalone_prinicipal == TRUE) {
memset(strval, 0, sizeof(strval));
strval[0] = "krbprincipal";
strval[1] = "krbprincipalaux";
strval[2] = "krbTicketPolicyAux";
if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0)
goto cleanup;
st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL);
if (st == LDAP_ALREADY_EXISTS && entry->mask & KADM5_LOAD) {
/* a load operation must replace an existing entry */
st = ldap_delete_ext_s(ld, standalone_principal_dn, NULL, NULL);
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf),
_("Principal delete failed (trying to replace "
"entry): %s"), ldap_err2string(st));
st = translate_ldap_error (st, OP_ADD);
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
} else {
st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL);
}
}
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf), _("Principal add failed: %s"),
ldap_err2string(st));
st = translate_ldap_error (st, OP_ADD);
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
} else {
/*
* Here existing ldap object is modified and can be related
* to any attribute, so always ensure that the ldap
* object is extended with all the kerberos related
* objectclasses so that there are no constraint
* violations.
*/
{
char *attrvalues[] = {"krbprincipalaux", "krbTicketPolicyAux", NULL};
int p, q, r=0, amask=0;
if ((st=checkattributevalue(ld, (xargs.dn) ? xargs.dn : principal_dn,
"objectclass", attrvalues, &amask)) != 0)
goto cleanup;
memset(strval, 0, sizeof(strval));
for (p=1, q=0; p<=2; p<<=1, ++q) {
if ((p & amask) == 0)
strval[r++] = attrvalues[q];
}
if (r != 0) {
if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0)
goto cleanup;
}
}
if (xargs.dn != NULL)
st=ldap_modify_ext_s(ld, xargs.dn, mods, NULL, NULL);
else
st = ldap_modify_ext_s(ld, principal_dn, mods, NULL, NULL);
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf), _("User modification failed: %s"),
ldap_err2string(st));
st = translate_ldap_error (st, OP_MOD);
krb5_set_error_message(context, st, "%s", errbuf);
goto cleanup;
}
if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT)
entry->fail_auth_count++;
}
cleanup:
if (user)
free(user);
if (filtuser)
free(filtuser);
free_xargs(xargs);
if (standalone_principal_dn)
free(standalone_principal_dn);
if (principal_dn)
free (principal_dn);
if (polname != NULL)
free(polname);
for (tre = 0; tre < ntrees; tre++)
free(subtreelist[tre]);
free(subtreelist);
if (subtree)
free (subtree);
if (bersecretkey) {
for (l=0; bersecretkey[l]; ++l) {
if (bersecretkey[l]->bv_val)
free (bersecretkey[l]->bv_val);
free (bersecretkey[l]);
}
free (bersecretkey);
}
if (keys)
free (keys);
ldap_mods_free(mods, 1);
ldap_osa_free_princ_ent(&princ_ent);
ldap_msgfree(result);
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
return(st);
}
krb5_error_code
krb5_read_tkt_policy(krb5_context context, krb5_ldap_context *ldap_context,
krb5_db_entry *entries, char *policy)
{
krb5_error_code st=0;
int mask=0, omask=0;
int tkt_mask=(KDB_MAX_LIFE_ATTR | KDB_MAX_RLIFE_ATTR | KDB_TKT_FLAGS_ATTR);
krb5_ldap_policy_params *tktpoldnparam=NULL;
if ((st=krb5_get_attributes_mask(context, entries, &mask)) != 0)
goto cleanup;
if ((mask & tkt_mask) == tkt_mask)
goto cleanup;
if (policy != NULL) {
st = krb5_ldap_read_policy(context, policy, &tktpoldnparam, &omask);
if (st && st != KRB5_KDB_NOENTRY) {
prepend_err_str(context, _("Error reading ticket policy. "), st,
st);
goto cleanup;
}
st = 0; /* reset the return status */
}
if ((mask & KDB_MAX_LIFE_ATTR) == 0) {
if ((omask & KDB_MAX_LIFE_ATTR) == KDB_MAX_LIFE_ATTR)
entries->max_life = tktpoldnparam->maxtktlife;
else if (ldap_context->lrparams->max_life)
entries->max_life = ldap_context->lrparams->max_life;
}
if ((mask & KDB_MAX_RLIFE_ATTR) == 0) {
if ((omask & KDB_MAX_RLIFE_ATTR) == KDB_MAX_RLIFE_ATTR)
entries->max_renewable_life = tktpoldnparam->maxrenewlife;
else if (ldap_context->lrparams->max_renewable_life)
entries->max_renewable_life = ldap_context->lrparams->max_renewable_life;
}
if ((mask & KDB_TKT_FLAGS_ATTR) == 0) {
if ((omask & KDB_TKT_FLAGS_ATTR) == KDB_TKT_FLAGS_ATTR)
entries->attributes = tktpoldnparam->tktflags;
else if (ldap_context->lrparams->tktflags)
entries->attributes |= ldap_context->lrparams->tktflags;
}
krb5_ldap_free_policy(context, tktpoldnparam);
cleanup:
return st;
}
krb5_error_code
krb5_decode_krbsecretkey(krb5_context context, krb5_db_entry *entries,
struct berval **bvalues,
krb5_tl_data *userinfo_tl_data, krb5_kvno *mkvno)
{
char *user=NULL;
int i=0, j=0, noofkeys=0;
krb5_key_data *key_data=NULL, *tmp;
krb5_error_code st=0;
if ((st=krb5_unparse_name(context, entries->princ, &user)) != 0)
goto cleanup;
for (i=0; bvalues[i] != NULL; ++i) {
krb5_int16 n_kd;
krb5_key_data *kd;
krb5_data in;
if (bvalues[i]->bv_len == 0)
continue;
in.length = bvalues[i]->bv_len;
in.data = bvalues[i]->bv_val;
st = asn1_decode_sequence_of_keys (&in,
&kd,
&n_kd,
mkvno);
if (st != 0) {
const char *msg = error_message(st);
st = -1; /* Something more appropriate ? */
krb5_set_error_message(context, st, _("unable to decode stored "
"principal key data (%s)"),
msg);
goto cleanup;
}
noofkeys += n_kd;
tmp = key_data;
/* Allocate an extra key data to avoid allocating zero bytes. */
key_data = realloc(key_data, (noofkeys + 1) * sizeof (krb5_key_data));
if (key_data == NULL) {
key_data = tmp;
st = ENOMEM;
goto cleanup;
}
for (j = 0; j < n_kd; j++)
key_data[noofkeys - n_kd + j] = kd[j];
free (kd);
}
entries->n_key_data = noofkeys;
entries->key_data = key_data;
cleanup:
ldap_value_free_len(bvalues);
free (user);
return st;
}
static char *
getstringtime(krb5_timestamp epochtime)
{
struct tm tme;
char *strtime=NULL;
time_t posixtime = epochtime;
strtime = calloc (50, 1);
if (strtime == NULL)
return NULL;
if (gmtime_r(&posixtime, &tme) == NULL)
return NULL;
strftime(strtime, 50, "%Y%m%d%H%M%SZ", &tme);
return strtime;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2201_0 |
crossvul-cpp_data_bad_5520_0 | /* Pango
* glyphstring.c:
*
* Copyright (C) 1999 Red Hat Software
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <glib.h>
#include "pango-glyph.h"
#include "pango-font.h"
#include "pango-impl-utils.h"
/**
* pango_glyph_string_new:
*
* Create a new #PangoGlyphString.
*
* Return value: the newly allocated #PangoGlyphString, which
* should be freed with pango_glyph_string_free().
*/
PangoGlyphString *
pango_glyph_string_new (void)
{
PangoGlyphString *string = g_slice_new (PangoGlyphString);
string->num_glyphs = 0;
string->space = 0;
string->glyphs = NULL;
string->log_clusters = NULL;
return string;
}
/**
* pango_glyph_string_set_size:
* @string: a #PangoGlyphString.
* @new_len: the new length of the string.
*
* Resize a glyph string to the given length.
*/
void
pango_glyph_string_set_size (PangoGlyphString *string, gint new_len)
{
g_return_if_fail (new_len >= 0);
while (new_len > string->space)
{
if (string->space == 0)
string->space = 1;
else
string->space *= 2;
if (string->space < 0)
{
g_warning ("glyph string length overflows maximum integer size, truncated");
new_len = string->space = G_MAXINT - 8;
}
}
string->glyphs = g_realloc (string->glyphs, string->space * sizeof (PangoGlyphInfo));
string->log_clusters = g_realloc (string->log_clusters, string->space * sizeof (gint));
string->num_glyphs = new_len;
}
GType
pango_glyph_string_get_type (void)
{
static GType our_type = 0;
if (G_UNLIKELY (our_type == 0))
our_type = g_boxed_type_register_static (I_("PangoGlyphString"),
(GBoxedCopyFunc)pango_glyph_string_copy,
(GBoxedFreeFunc)pango_glyph_string_free);
return our_type;
}
/**
* pango_glyph_string_copy:
* @string: a #PangoGlyphString, may be %NULL
*
* Copy a glyph string and associated storage.
*
* Return value: the newly allocated #PangoGlyphString, which
* should be freed with pango_glyph_string_free(),
* or %NULL if @string was %NULL.
*/
PangoGlyphString *
pango_glyph_string_copy (PangoGlyphString *string)
{
PangoGlyphString *new_string;
if (string == NULL)
return NULL;
new_string = g_slice_new (PangoGlyphString);
*new_string = *string;
new_string->glyphs = g_memdup (string->glyphs,
string->space * sizeof (PangoGlyphInfo));
new_string->log_clusters = g_memdup (string->log_clusters,
string->space * sizeof (gint));
return new_string;
}
/**
* pango_glyph_string_free:
* @string: a #PangoGlyphString, may be %NULL
*
* Free a glyph string and associated storage.
*/
void
pango_glyph_string_free (PangoGlyphString *string)
{
if (string == NULL)
return;
g_free (string->glyphs);
g_free (string->log_clusters);
g_slice_free (PangoGlyphString, string);
}
/**
* pango_glyph_string_extents_range:
* @glyphs: a #PangoGlyphString
* @start: start index
* @end: end index (the range is the set of bytes with
indices such that start <= index < end)
* @font: a #PangoFont
* @ink_rect: rectangle used to store the extents of the glyph string range as drawn
* or %NULL to indicate that the result is not needed.
* @logical_rect: rectangle used to store the logical extents of the glyph string range
* or %NULL to indicate that the result is not needed.
*
* Computes the extents of a sub-portion of a glyph string. The extents are
* relative to the start of the glyph string range (the origin of their
* coordinate system is at the start of the range, not at the start of the entire
* glyph string).
**/
void
pango_glyph_string_extents_range (PangoGlyphString *glyphs,
int start,
int end,
PangoFont *font,
PangoRectangle *ink_rect,
PangoRectangle *logical_rect)
{
int x_pos = 0;
int i;
/* Note that the handling of empty rectangles for ink
* and logical rectangles is different. A zero-height ink
* rectangle makes no contribution to the overall ink rect,
* while a zero-height logical rect still reserves horizontal
* width. Also, we may return zero-width, positive height
* logical rectangles, while we'll never do that for the
* ink rect.
*/
g_return_if_fail (start <= end);
if (G_UNLIKELY (!ink_rect && !logical_rect))
return;
if (ink_rect)
{
ink_rect->x = 0;
ink_rect->y = 0;
ink_rect->width = 0;
ink_rect->height = 0;
}
if (logical_rect)
{
logical_rect->x = 0;
logical_rect->y = 0;
logical_rect->width = 0;
logical_rect->height = 0;
}
for (i = start; i < end; i++)
{
PangoRectangle glyph_ink;
PangoRectangle glyph_logical;
PangoGlyphGeometry *geometry = &glyphs->glyphs[i].geometry;
pango_font_get_glyph_extents (font, glyphs->glyphs[i].glyph,
ink_rect ? &glyph_ink : NULL,
logical_rect ? &glyph_logical : NULL);
if (ink_rect && glyph_ink.width != 0 && glyph_ink.height != 0)
{
if (ink_rect->width == 0 || ink_rect->height == 0)
{
ink_rect->x = x_pos + glyph_ink.x + geometry->x_offset;
ink_rect->width = glyph_ink.width;
ink_rect->y = glyph_ink.y + geometry->y_offset;
ink_rect->height = glyph_ink.height;
}
else
{
int new_x, new_y;
new_x = MIN (ink_rect->x, x_pos + glyph_ink.x + geometry->x_offset);
ink_rect->width = MAX (ink_rect->x + ink_rect->width,
x_pos + glyph_ink.x + glyph_ink.width + geometry->x_offset) - new_x;
ink_rect->x = new_x;
new_y = MIN (ink_rect->y, glyph_ink.y + geometry->y_offset);
ink_rect->height = MAX (ink_rect->y + ink_rect->height,
glyph_ink.y + glyph_ink.height + geometry->y_offset) - new_y;
ink_rect->y = new_y;
}
}
if (logical_rect)
{
logical_rect->width += geometry->width;
if (i == start)
{
logical_rect->y = glyph_logical.y;
logical_rect->height = glyph_logical.height;
}
else
{
int new_y = MIN (logical_rect->y, glyph_logical.y);
logical_rect->height = MAX (logical_rect->y + logical_rect->height,
glyph_logical.y + glyph_logical.height) - new_y;
logical_rect->y = new_y;
}
}
x_pos += geometry->width;
}
}
/**
* pango_glyph_string_extents:
* @glyphs: a #PangoGlyphString
* @font: a #PangoFont
* @ink_rect: rectangle used to store the extents of the glyph string as drawn
* or %NULL to indicate that the result is not needed.
* @logical_rect: rectangle used to store the logical extents of the glyph string
* or %NULL to indicate that the result is not needed.
*
* Compute the logical and ink extents of a glyph string. See the documentation
* for pango_font_get_glyph_extents() for details about the interpretation
* of the rectangles.
*/
void
pango_glyph_string_extents (PangoGlyphString *glyphs,
PangoFont *font,
PangoRectangle *ink_rect,
PangoRectangle *logical_rect)
{
pango_glyph_string_extents_range (glyphs, 0, glyphs->num_glyphs,
font, ink_rect, logical_rect);
}
/**
* pango_glyph_string_get_width:
* @glyphs: a #PangoGlyphString
*
* Computes the logical width of the glyph string as can also be computed
* using pango_glyph_string_extents(). However, since this only computes the
* width, it's much faster. This is in fact only a convenience function that
* computes the sum of geometry.width for each glyph in the @glyphs.
*
* Return value: the logical width of the glyph string.
*
* Since: 1.14
*/
int
pango_glyph_string_get_width (PangoGlyphString *glyphs)
{
int i;
int width = 0;
for (i = 0; i < glyphs->num_glyphs; i++)
width += glyphs->glyphs[i].geometry.width;
return width;
}
/**
* pango_glyph_string_get_logical_widths:
* @glyphs: a #PangoGlyphString
* @text: the text corresponding to the glyphs
* @length: the length of @text, in bytes
* @embedding_level: the embedding level of the string
* @logical_widths: an array whose length is g_utf8_strlen (text, length)
* to be filled in with the resulting character widths.
*
* Given a #PangoGlyphString resulting from pango_shape() and the corresponding
* text, determine the screen width corresponding to each character. When
* multiple characters compose a single cluster, the width of the entire
* cluster is divided equally among the characters.
**/
void
pango_glyph_string_get_logical_widths (PangoGlyphString *glyphs,
const char *text,
int length,
int embedding_level,
int *logical_widths)
{
/* Build a PangoGlyphItem so we can use PangoGlyphItemIter.
* This API should have been made to take a PangoGlyphItem... */
PangoItem item = {0, length, g_utf8_strlen (text, length),
{NULL, NULL, NULL,
embedding_level, PANGO_GRAVITY_AUTO, 0,
PANGO_SCRIPT_UNKNOWN, NULL,
NULL}};
PangoGlyphItem glyph_item = {&item, glyphs};
PangoGlyphItemIter iter;
gboolean has_cluster;
int dir;
dir = embedding_level % 2 == 0 ? +1 : -1;
for (has_cluster = pango_glyph_item_iter_init_start (&iter, &glyph_item, text);
has_cluster;
has_cluster = pango_glyph_item_iter_next_cluster (&iter))
{
int glyph_index, char_index, num_chars, cluster_width = 0, char_width;
for (glyph_index = iter.start_glyph;
glyph_index != iter.end_glyph;
glyph_index += dir)
{
cluster_width += glyphs->glyphs[glyph_index].geometry.width;
}
num_chars = iter.end_char - iter.start_char;
if (num_chars) /* pedantic */
{
char_width = cluster_width / num_chars;
for (char_index = iter.start_char;
char_index < iter.end_char;
char_index++)
{
logical_widths[char_index] = char_width;
}
/* add any residues to the first char */
logical_widths[iter.start_char] += cluster_width - (char_width * num_chars);
}
}
}
/* The initial implementation here is script independent,
* but it might actually need to be virtualized into the
* rendering modules. Otherwise, we probably will end up
* enforcing unnatural cursor behavior for some languages.
*
* The only distinction that Uniscript makes is whether
* cursor positioning is allowed within clusters or not.
*/
/**
* pango_glyph_string_index_to_x:
* @glyphs: the glyphs return from pango_shape()
* @text: the text for the run
* @length: the number of bytes (not characters) in @text.
* @analysis: the analysis information return from pango_itemize()
* @index_: the byte index within @text
* @trailing: whether we should compute the result for the beginning (%FALSE)
* or end (%TRUE) of the character.
* @x_pos: location to store result
*
* Converts from character position to x position. (X position
* is measured from the left edge of the run). Character positions
* are computed by dividing up each cluster into equal portions.
*/
void
pango_glyph_string_index_to_x (PangoGlyphString *glyphs,
char *text,
int length,
PangoAnalysis *analysis,
int index,
gboolean trailing,
int *x_pos)
{
int i;
int start_xpos = 0;
int end_xpos = 0;
int width = 0;
int start_index = -1;
int end_index = -1;
int cluster_chars = 0;
int cluster_offset = 0;
char *p;
g_return_if_fail (glyphs != NULL);
g_return_if_fail (length >= 0);
g_return_if_fail (length == 0 || text != NULL);
if (!x_pos) /* Allow the user to do the useless */
return;
if (glyphs->num_glyphs == 0)
{
*x_pos = 0;
return;
}
/* Calculate the starting and ending character positions
* and x positions for the cluster
*/
if (analysis->level % 2) /* Right to left */
{
for (i = glyphs->num_glyphs - 1; i >= 0; i--)
width += glyphs->glyphs[i].geometry.width;
for (i = glyphs->num_glyphs - 1; i >= 0; i--)
{
if (glyphs->log_clusters[i] > index)
{
end_index = glyphs->log_clusters[i];
end_xpos = width;
break;
}
if (glyphs->log_clusters[i] != start_index)
{
start_index = glyphs->log_clusters[i];
start_xpos = width;
}
width -= glyphs->glyphs[i].geometry.width;
}
}
else /* Left to right */
{
for (i = 0; i < glyphs->num_glyphs; i++)
{
if (glyphs->log_clusters[i] > index)
{
end_index = glyphs->log_clusters[i];
end_xpos = width;
break;
}
if (glyphs->log_clusters[i] != start_index)
{
start_index = glyphs->log_clusters[i];
start_xpos = width;
}
width += glyphs->glyphs[i].geometry.width;
}
}
if (end_index == -1)
{
end_index = length;
end_xpos = (analysis->level % 2) ? 0 : width;
}
/* Calculate offset of character within cluster */
p = text + start_index;
while (p < text + end_index)
{
if (p < text + index)
cluster_offset++;
cluster_chars++;
p = g_utf8_next_char (p);
}
if (trailing)
cluster_offset += 1;
*x_pos = ((cluster_chars - cluster_offset) * start_xpos +
cluster_offset * end_xpos) / cluster_chars;
}
/**
* pango_glyph_string_x_to_index:
* @glyphs: the glyphs returned from pango_shape()
* @text: the text for the run
* @length: the number of bytes (not characters) in text.
* @analysis: the analysis information return from pango_itemize()
* @x_pos: the x offset (in Pango units)
* @index_: location to store calculated byte index within @text
* @trailing: location to store a boolean indicating
* whether the user clicked on the leading or trailing
* edge of the character.
*
* Convert from x offset to character position. Character positions
* are computed by dividing up each cluster into equal portions.
* In scripts where positioning within a cluster is not allowed
* (such as Thai), the returned value may not be a valid cursor
* position; the caller must combine the result with the logical
* attributes for the text to compute the valid cursor position.
*/
void
pango_glyph_string_x_to_index (PangoGlyphString *glyphs,
char *text,
int length,
PangoAnalysis *analysis,
int x_pos,
int *index,
gboolean *trailing)
{
int i;
int start_xpos = 0;
int end_xpos = 0;
int width = 0;
int start_index = -1;
int end_index = -1;
int cluster_chars = 0;
char *p;
gboolean found = FALSE;
/* Find the cluster containing the position */
width = 0;
if (analysis->level % 2) /* Right to left */
{
for (i = glyphs->num_glyphs - 1; i >= 0; i--)
width += glyphs->glyphs[i].geometry.width;
for (i = glyphs->num_glyphs - 1; i >= 0; i--)
{
if (glyphs->log_clusters[i] != start_index)
{
if (found)
{
end_index = glyphs->log_clusters[i];
end_xpos = width;
break;
}
else
{
start_index = glyphs->log_clusters[i];
start_xpos = width;
}
}
width -= glyphs->glyphs[i].geometry.width;
if (width <= x_pos && x_pos < width + glyphs->glyphs[i].geometry.width)
found = TRUE;
}
}
else /* Left to right */
{
for (i = 0; i < glyphs->num_glyphs; i++)
{
if (glyphs->log_clusters[i] != start_index)
{
if (found)
{
end_index = glyphs->log_clusters[i];
end_xpos = width;
break;
}
else
{
start_index = glyphs->log_clusters[i];
start_xpos = width;
}
}
if (width <= x_pos && x_pos < width + glyphs->glyphs[i].geometry.width)
found = TRUE;
width += glyphs->glyphs[i].geometry.width;
}
}
if (end_index == -1)
{
end_index = length;
end_xpos = (analysis->level % 2) ? 0 : width;
}
/* Calculate number of chars within cluster */
p = text + start_index;
while (p < text + end_index)
{
p = g_utf8_next_char (p);
cluster_chars++;
}
if (start_xpos == end_xpos)
{
if (index)
*index = start_index;
if (trailing)
*trailing = FALSE;
}
else
{
double cp = ((double)(x_pos - start_xpos) * cluster_chars) / (end_xpos - start_xpos);
/* LTR and right-to-left have to be handled separately
* here because of the edge condition when we are exactly
* at a pixel boundary; end_xpos goes with the next
* character for LTR, with the previous character for RTL.
*/
if (start_xpos < end_xpos) /* Left-to-right */
{
if (index)
{
char *p = text + start_index;
int i = 0;
while (i + 1 <= cp)
{
p = g_utf8_next_char (p);
i++;
}
*index = (p - text);
}
if (trailing)
*trailing = (cp - (int)cp >= 0.5) ? TRUE : FALSE;
}
else /* Right-to-left */
{
if (index)
{
char *p = text + start_index;
int i = 0;
while (i + 1 < cp)
{
p = g_utf8_next_char (p);
i++;
}
*index = (p - text);
}
if (trailing)
{
double cp_flip = cluster_chars - cp;
*trailing = (cp_flip - (int)cp_flip >= 0.5) ? FALSE : TRUE;
}
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5520_0 |
crossvul-cpp_data_good_3669_0 | /* Alternative malloc implementation for multiple threads without
lock contention based on dlmalloc. (C) 2005-2010 Niall Douglas
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#if 0 /* Effectively makes nedmalloc = dlmalloc */
#define THREADCACHEMAX 0
#define DEFAULT_GRANULARITY 65536
#define DEFAULTMAXTHREADSINPOOL 1
#endif
#ifdef _MSC_VER
/* Enable full aliasing on MSVC */
/*#pragma optimize("a", on)*/
/*#pragma optimize("g", off)*/
#pragma warning(push)
#pragma warning(disable:4100) /* unreferenced formal parameter */
#pragma warning(disable:4127) /* conditional expression is constant */
#pragma warning(disable:4232) /* address of dllimport is not static, identity not guaranteed */
#pragma warning(disable:4706) /* assignment within conditional expression */
#define _CRT_SECURE_NO_WARNINGS 1 /* Don't care about MSVC warnings on POSIX functions */
#include <stdio.h>
#define fopen(f, m) _fsopen((f), (m), 0x40/*_SH_DENYNO*/) /* Have Windows let other programs view the log file as it is written */
#ifndef UNICODE
#define UNICODE /* Turn on windows unicode support */
#endif
#else
#include <stdio.h>
#endif
/*#define NEDMALLOC_DEBUG 1*/
/*#define ENABLE_LOGGING 7
#define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) (((type)&ENABLE_LOGGING)&&((size)>16*1024))
#define NEDMALLOC_STACKBACKTRACEDEPTH 16*/
/*#define NEDMALLOC_FORCERESERVE(p, mem, size) (((size)>=(256*1024)) ? M2_RESERVE_MULT(8) : 0)*/
/*#define WIN32_DIRECT_USE_FILE_MAPPINGS 0*/
/*#define NEDMALLOC_DEBUG 1*/
/*#define FULLSANITYCHECKS*/
/* There is only support for the user mode page allocator on Windows at present */
#if !defined(ENABLE_USERMODEPAGEALLOCATOR)
#define ENABLE_USERMODEPAGEALLOCATOR 0
#endif
/* If link time code generation is on, don't force or prevent inlining */
#if defined(_MSC_VER) && defined(NEDMALLOC_DLL_EXPORTS)
#define FORCEINLINE
#define NOINLINE
#endif
#include "nedmalloc.h"
#include <errno.h>
#if defined(WIN32)
#include <malloc.h>
#else
#if defined(__cplusplus)
extern "C"
#else
extern
#endif
#if defined(__linux__) || defined(__FreeBSD__)
/* Sadly we can't include <malloc.h> as it causes a redefinition error */
size_t malloc_usable_size(void *);
#elif defined(__APPLE__)
size_t malloc_size(const void *ptr);
#else
#error Do not know what to do here
#endif
#endif
#if USE_ALLOCATOR==1
#define MSPACES 1
#define ONLY_MSPACES 1
#endif
#define USE_DL_PREFIX 1
#ifndef USE_LOCKS
#define USE_LOCKS 1
#endif
#define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */
#ifndef NEDMALLOC_DEBUG
#if defined(DEBUG) || defined(_DEBUG)
#define NEDMALLOC_DEBUG 1
#else
#define NEDMALLOC_DEBUG 0
#endif
#endif
/* We need to consistently define DEBUG=0|1, _DEBUG and NDEBUG for dlmalloc */
#if !defined(DEBUG) && !defined(NDEBUG)
#ifdef __GNUC__
#warning DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed.
#elif defined(_MSC_VER)
#pragma message(__FILE__ ": WARNING: DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed.")
#endif
#endif
#undef DEBUG
#undef _DEBUG
#if NEDMALLOC_DEBUG
#define _DEBUG
#define DEBUG 1
#else
#define DEBUG 0
#endif
#ifdef NDEBUG /* Disable assert checking on release builds */
#undef DEBUG
#undef _DEBUG
#endif
/* The default of 64Kb means we spend too much time kernel-side */
#ifndef DEFAULT_GRANULARITY
#define DEFAULT_GRANULARITY (1*1024*1024)
#if DEBUG
#define DEFAULT_GRANULARITY_ALIGNED
#endif
#endif
/*#define USE_SPIN_LOCKS 0*/
#if ENABLE_USERMODEPAGEALLOCATOR
extern int OSHavePhysicalPageSupport(void);
extern void *userpage_malloc(size_t toallocate, unsigned flags);
extern int userpage_free(void *mem, size_t size);
extern void *userpage_realloc(void *mem, size_t oldsize, size_t newsize, int flags, unsigned flags2);
#define USERPAGE_TOPDOWN (M2_CUSTOM_FLAGS_BEGIN<<0)
#define USERPAGE_NOCOMMIT (M2_CUSTOM_FLAGS_BEGIN<<1)
/* This can provide a very significant speed boost */
#undef MMAP_CLEARS
#define MMAP_CLEARS 0
#define MUNMAP(h, a, s) (!OSHavePhysicalPageSupport() ? MUNMAP_DEFAULT((h), (a), (s)) : userpage_free((a), (s)))
#define MMAP(s, f) (!OSHavePhysicalPageSupport() ? MMAP_DEFAULT((s)) : userpage_malloc((s), (f)))
#define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : userpage_realloc((addr), (osz), (nsz), (mv), 0))
#define DIRECT_MMAP(h, s, f) (!OSHavePhysicalPageSupport() ? DIRECT_MMAP_DEFAULT((h), (s), (f)) : userpage_malloc((s), (f)|USERPAGE_TOPDOWN))
#define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : userpage_realloc((a), (os), (ns), (f), (f2)|USERPAGE_TOPDOWN))
/*#undef MREMAP
#define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : MFAIL)*/
/*#undef DIRECT_MREMAP
#define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : MFAIL)*/
#endif
#include "malloc.c.h"
#ifdef NDEBUG /* Disable assert checking on release builds */
#undef DEBUG
#endif
/* The default number of threads allowed into a pool at once */
#ifndef DEFAULTMAXTHREADSINPOOL
#define DEFAULTMAXTHREADSINPOOL 4
#endif
/* The maximum size to be allocated from the thread cache */
#ifndef THREADCACHEMAX
#define THREADCACHEMAX 8192
#elif THREADCACHEMAX && !defined(THREADCACHEMAXBINS)
#ifdef __GNUC__
#warning If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)?
#elif defined(_MSC_VER)
#pragma message(__FILE__ ": WARNING: If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)?")
#endif
#endif
/* The maximum concurrent threads in a pool possible */
#ifndef MAXTHREADSINPOOL
#define MAXTHREADSINPOOL 16
#endif
/* The maximum number of threadcaches which can be allocated */
#ifndef THREADCACHEMAXCACHES
#define THREADCACHEMAXCACHES 256
#endif
#ifndef THREADCACHEMAXBINS
#if 0
/* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */
#define THREADCACHEMAXBINS ((13-4)*2)
#else
/* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */
#define THREADCACHEMAXBINS (13-4)
#endif
#endif
/* Point at which the free space in a thread cache is garbage collected */
#ifndef THREADCACHEMAXFREESPACE
#define THREADCACHEMAXFREESPACE (1024*1024)
#endif
/* NEDMALLOC_FORCERESERVE is used to force malloc2 flags for normal malloc, calloc et al */
#ifndef NEDMALLOC_FORCERESERVE
#define NEDMALLOC_FORCERESERVE(p, mem, size) 0
#endif
/* ENABLE_LOGGING is a bitmask of what events to log */
#if ENABLE_LOGGING
#ifndef NEDMALLOC_LOGFILE
#define NEDMALLOC_LOGFILE "nedmalloc.csv"
#endif
#endif
/* NEDMALLOC_TESTLOGENTRY returns non-zero if the entry should be logged */
#ifndef NEDMALLOC_TESTLOGENTRY
#define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) ((type)&ENABLE_LOGGING)
#endif
/* NEDMALLOC_STACKBACKTRACEDEPTH has the logger store a stack backtrace per logged item. Slow! */
#ifndef NEDMALLOC_STACKBACKTRACEDEPTH
#define NEDMALLOC_STACKBACKTRACEDEPTH 0
#endif
#if NEDMALLOC_STACKBACKTRACEDEPTH
#include "Dbghelp.h"
#include "psapi.h"
#pragma comment(lib, "dbghelp.lib")
#pragma comment(lib, "psapi.lib")
#endif
#define NM_FLAGS_MASK (M2_FLAGS_MASK&~M2_ZERO_MEMORY)
#if USE_LOCKS
#ifdef WIN32
#define TLSVAR DWORD
#define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k))
#define TLSFREE(k) (!TlsFree(k))
#define TLSGET(k) TlsGetValue(k)
#define TLSSET(k, a) (!TlsSetValue(k, a))
#ifdef DEBUG
static LPVOID ChkedTlsGetValue(DWORD idx)
{
LPVOID ret=TlsGetValue(idx);
assert(S_OK==GetLastError());
return ret;
}
#undef TLSGET
#define TLSGET(k) ChkedTlsGetValue(k)
#endif
#else
#define TLSVAR pthread_key_t
#define TLSALLOC(k) pthread_key_create(k, 0)
#define TLSFREE(k) pthread_key_delete(k)
#define TLSGET(k) pthread_getspecific(k)
#define TLSSET(k, a) pthread_setspecific(k, a)
#endif
#else /* Probably if you're not using locks then you don't want ANY pthread stuff at all */
#define TLSVAR void *
#define TLSALLOC(k) (*k=0)
#define TLSFREE(k) (k=0)
#define TLSGET(k) k
#define TLSSET(k, a) (k=a, 0)
#endif
#if ENABLE_USERMODEPAGEALLOCATOR
#include "usermodepageallocator.c"
#endif
#if defined(__cplusplus)
#if !defined(NO_NED_NAMESPACE)
namespace nedalloc {
#else
extern "C" {
#endif
#endif
#if USE_ALLOCATOR==0
static void *unsupported_operation(const char *opname) THROWSPEC
{
fprintf(stderr, "nedmalloc: The operation %s is not supported under this build configuration\n", opname);
abort();
return 0;
}
static size_t mspacecounter=(size_t) 0xdeadbeef;
#endif
#ifndef ENABLE_FAST_HEAP_DETECTION
static void *RESTRICT leastusedaddress;
static size_t largestusedblock;
#endif
/* Used to redirect system allocator ops if needed */
extern void *(*sysmalloc)(size_t);
extern void *(*syscalloc)(size_t, size_t);
extern void *(*sysrealloc)(void *, size_t);
extern void (*sysfree)(void *);
extern size_t (*sysblksize)(void *);
#if !defined(REPLACE_SYSTEM_ALLOCATOR) || (!defined(_MSC_VER) && !defined(__MINGW32__))
void *(*sysmalloc)(size_t)=malloc;
void *(*syscalloc)(size_t, size_t)=calloc;
void *(*sysrealloc)(void *, size_t)=realloc;
void (*sysfree)(void *)=free;
size_t (*sysblksize)(void *)=
#if defined(_MSC_VER) || defined(__MINGW32__)
/* This is the MSVCRT equivalent */
_msize;
#elif defined(__linux__) || defined(__FreeBSD__)
/* This is the glibc/ptmalloc2/dlmalloc/BSD equivalent. */
malloc_usable_size;
#elif defined(__APPLE__)
/* This is the Apple BSD libc equivalent. */
malloc_size;
#else
#error Cannot tolerate the memory allocator of an unknown system!
#endif
#else
/* Remove the MSVCRT dependency on the memory functions */
void *(*sysmalloc)(size_t);
void *(*syscalloc)(size_t, size_t);
void *(*sysrealloc)(void *, size_t);
void (*sysfree)(void *);
size_t (*sysblksize)(void *);
#endif
static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallMalloc(void *RESTRICT mspace, size_t size, size_t alignment, unsigned flags) THROWSPEC
{
void *RESTRICT ret=0;
#if USE_MAGIC_HEADERS
size_t _alignment=alignment;
size_t *_ret=0;
size+=alignment+3*sizeof(size_t);
_alignment=0;
#endif
#if USE_ALLOCATOR==0
ret=(flags & M2_ZERO_MEMORY) ? syscalloc(1, size) : sysmalloc(size); /* magic headers takes care of alignment */
#elif USE_ALLOCATOR==1
ret=mspace_malloc2((mstate) mspace, size, alignment, flags);
#ifndef ENABLE_FAST_HEAP_DETECTION
if(ret)
{
mchunkptr p=mem2chunk(ret);
size_t truesize=chunksize(p) - overhead_for(p);
if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr;
if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1);
}
#endif
#endif
if(!ret) return 0;
#if DEBUG
if(flags & M2_ZERO_MEMORY)
{
const char *RESTRICT n;
for(n=(const char *)ret; n<(const char *)ret+size; n++)
{
assert(!*n);
}
}
#endif
#if USE_MAGIC_HEADERS
_ret=(size_t *) ret;
ret=(void *)(_ret+3);
if(alignment) ret=(void *)(((size_t) ret+alignment-1)&~(alignment-1));
for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *)"NEDMALOC";
_ret[0]=(size_t) mspace;
_ret[1]=size-3*sizeof(size_t);
#endif
return ret;
}
static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallRealloc(void *RESTRICT mspace, void *RESTRICT mem, int isforeign, size_t oldsize, size_t newsize, size_t alignment, unsigned flags) THROWSPEC
{
void *RESTRICT ret=0;
#if USE_MAGIC_HEADERS
mstate oldmspace=0;
size_t *_ret=0, *_mem=(size_t *) mem-3;
#endif
if(isforeign)
{ /* Transfer */
#if USE_MAGIC_HEADERS
assert(_mem[0]!=*(size_t *) "NEDMALOC");
#endif
if((ret=CallMalloc(mspace, newsize, alignment, flags)))
{
#if defined(DEBUG)
printf("*** nedmalloc frees system allocated block %p\n", mem);
#endif
memcpy(ret, mem, oldsize<newsize ? oldsize : newsize);
sysfree(mem);
}
return ret;
}
#if USE_MAGIC_HEADERS
assert(_mem[0]==*(size_t *) "NEDMALOC");
newsize+=3*sizeof(size_t);
oldmspace=(mstate) _mem[1];
assert(oldsize>=_mem[2]);
for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc");
mem=(void *)(++_mem);
#endif
#if USE_ALLOCATOR==0
ret=sysrealloc(mem, newsize);
#elif USE_ALLOCATOR==1
ret=mspace_realloc2((mstate) mspace, mem, newsize, alignment, flags);
#ifndef ENABLE_FAST_HEAP_DETECTION
if(ret)
{
mchunkptr p=mem2chunk(ret);
size_t truesize=chunksize(p) - overhead_for(p);
if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr;
if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1);
}
#endif
#endif
if(!ret)
{ /* Put it back the way it was */
#if USE_MAGIC_HEADERS
for(; *_mem==0; *_mem++=*(size_t *) "NEDMALOC");
#endif
return 0;
}
#if USE_MAGIC_HEADERS
_ret=(size_t *) ret;
ret=(void *)(_ret+3);
for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *) "NEDMALOC";
_ret[0]=(size_t) mspace;
_ret[1]=newsize-3*sizeof(size_t);
#endif
return ret;
}
static FORCEINLINE void CallFree(void *RESTRICT mspace, void *RESTRICT mem, int isforeign) THROWSPEC
{
#if USE_MAGIC_HEADERS
mstate oldmspace=0;
size_t *_mem=(size_t *) mem-3, oldsize=0;
#endif
if(isforeign)
{
#if USE_MAGIC_HEADERS
assert(_mem[0]!=*(size_t *) "NEDMALOC");
#endif
#if defined(DEBUG)
printf("*** nedmalloc frees system allocated block %p\n", mem);
#endif
sysfree(mem);
return;
}
#if USE_MAGIC_HEADERS
assert(_mem[0]==*(size_t *) "NEDMALOC");
oldmspace=(mstate) _mem[1];
oldsize=_mem[2];
for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc");
mem=(void *)(++_mem);
#endif
#if USE_ALLOCATOR==0
sysfree(mem);
#elif USE_ALLOCATOR==1
mspace_free((mstate) mspace, mem);
#endif
}
static NEDMALLOCNOALIASATTR mstate nedblkmstate(void *RESTRICT mem) THROWSPEC
{
if(mem)
{
#if USE_MAGIC_HEADERS
size_t *_mem=(size_t *) mem-3;
if(_mem[0]==*(size_t *) "NEDMALOC")
{
return (mstate) _mem[1];
}
else return 0;
#else
#if USE_ALLOCATOR==0
/* Fail everything */
return 0;
#elif USE_ALLOCATOR==1
#ifdef ENABLE_FAST_HEAP_DETECTION
#ifdef WIN32
/* On Windows for RELEASE both x86 and x64 the NT heap precedes each block with an eight byte header
which looks like:
normal: 4 bytes of size, 4 bytes of [char < 64, char < 64, char < 64 bit 0 always set, char random ]
mmaped: 4 bytes of size 4 bytes of [zero, zero, 0xb, zero ]
On Windows for DEBUG both x86 and x64 the preceding four bytes is always 0xfdfdfdfd (no man's land).
*/
#pragma pack(push, 1)
struct _HEAP_ENTRY
{
USHORT Size;
USHORT PreviousSize;
UCHAR Cookie; /* SegmentIndex */
UCHAR Flags; /* always bit 0 (HEAP_ENTRY_BUSY). bit 1=(HEAP_ENTRY_EXTRA_PRESENT), bit 2=normal block (HEAP_ENTRY_FILL_PATTERN), bit 3=mmap block (HEAP_ENTRY_VIRTUAL_ALLOC). Bit 4 (HEAP_ENTRY_LAST_ENTRY) could be set */
UCHAR UnusedBytes;
UCHAR SmallTagIndex; /* fastbin index. Always one of 0x02, 0x03, 0x04 < 0x80 */
} *RESTRICT he=((struct _HEAP_ENTRY *) mem)-1;
#pragma pack(pop)
unsigned int header=((unsigned int *)mem)[-1], mask1=0x8080E100, result1, mask2=0xFFFFFF06, result2;
result1=header & mask1; /* Positive testing for NT heap */
result2=header & mask2; /* Positive testing for dlmalloc */
if(result1==0x00000100 && result2!=0x00000102)
{ /* This is likely a NT heap block */
return 0;
}
#endif
#ifdef __linux__
/* On Linux glibc uses ptmalloc2 (really dlmalloc) just as we do, but prev_foot contains rubbish
when the preceding block is allocated because ptmalloc2 finds the local mstate by rounding the ptr
down to the nearest megabyte. It's like dlmalloc with FOOTERS disabled. */
mchunkptr p=mem2chunk(mem);
mstate fm=get_mstate_for(p);
/* If it's a ptmalloc2 block, fm is likely to be some crazy value */
if(!is_aligned(fm)) return 0;
if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0;
if(ok_magic(fm))
return fm;
else
return 0;
if(1) { }
#endif
else
{
mchunkptr p=mem2chunk(mem);
mstate fm=get_mstate_for(p);
assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */
if(ok_magic(fm))
return fm;
}
#else
#ifdef WIN32
#ifdef _MSC_VER
__try
#elif defined(__MINGW32__)
__try1
#endif
#endif
{
/* We try to return zero here if it isn't one of our own blocks, however
the current block annotation scheme used by dlmalloc makes it impossible
to be absolutely sure of avoiding a segfault.
mchunkptr->prev_foot = mem-(2*size_t) = mstate ^ mparams.magic for PRECEDING block;
mchunkptr->head = mem-(1*size_t) = 8 multiple size of this block with bottom three bits = FLAG_BITS
FLAG_BITS = bit 0 is CINUSE (currently in use unless is mmap), bit 1 is PINUSE (previous block currently
in use unless mmap), bit 2 is UNUSED and currently is always zero.
*/
register void *RESTRICT leastusedaddress_=leastusedaddress; /* Cache these to avoid register reloading */
register size_t largestusedblock_=largestusedblock;
if(!is_aligned(mem)) return 0; /* Would fail very rarely as all allocators return aligned blocks */
if(mem<leastusedaddress_) return 0; /* Simple but effective */
{
mchunkptr p=mem2chunk(mem);
mstate fm=0;
int ismmapped=is_mmapped(p);
if((!ismmapped && !is_inuse(p)) || (p->head & FLAG4_BIT)) return 0;
/* Reduced uncertainty by 0.5^2 = 25.0% */
/* size should never exceed largestusedblock */
if(chunksize(p)-overhead_for(p)>largestusedblock_) return 0;
/* Reduced uncertainty by a minimum of 0.5^3 = 12.5%, maximum 0.5^16 = 0.0015% */
/* Having sanity checked prev_foot and head, check next block */
if(!ismmapped && (!next_pinuse(p) || (next_chunk(p)->head & FLAG4_BIT))) return 0;
/* Reduced uncertainty by 0.5^5 = 3.13% or 0.5^18 = 0.00038% */
#if 0
/* If previous block is free, check that its next block pointer equals us */
if(!ismmapped && !pinuse(p))
if(next_chunk(prev_chunk(p))!=p) return 0;
/* We could start comparing prev_foot's for similarity but it starts getting slow. */
#endif
fm = get_mstate_for(p);
if(!is_aligned(fm) || (void *)fm<leastusedaddress_) return 0;
#if 0
/* See if mem is lower in memory than mem */
if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0;
#endif
assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */
if(ok_magic(fm))
return fm;
}
}
#ifdef WIN32
#ifdef _MSC_VER
__except(1) { }
#elif defined(__MINGW32__)
__except1(1) { }
#endif
#endif
#endif
#endif
#endif
}
return 0;
}
NEDMALLOCNOALIASATTR size_t nedblksize(int *RESTRICT isforeign, void *RESTRICT mem, unsigned flags) THROWSPEC
{
if(mem)
{
if(isforeign) *isforeign=1;
#if USE_MAGIC_HEADERS
{
size_t *_mem=(size_t *) mem-3;
if(_mem[0]==*(size_t *) "NEDMALOC")
{
mstate mspace=(mstate) _mem[1];
size_t size=_mem[2];
if(isforeign) *isforeign=0;
return size;
}
}
#elif USE_ALLOCATOR==1
if((flags & NM_SKIP_TOLERANCE_CHECKS) || nedblkmstate(mem))
{
mchunkptr p=mem2chunk(mem);
if(isforeign) *isforeign=0;
return chunksize(p)-overhead_for(p);
}
#ifdef DEBUG
else
{
int a=1; /* Set breakpoints here if needed */
}
#endif
#endif
#if defined(ENABLE_TOLERANT_NEDMALLOC) || USE_ALLOCATOR==0
return sysblksize(mem);
#endif
}
return 0;
}
NEDMALLOCNOALIASATTR size_t nedmemsize(void *RESTRICT mem) THROWSPEC { return nedblksize(0, mem, 0); }
NEDMALLOCNOALIASATTR void nedsetvalue(void *v) THROWSPEC { nedpsetvalue((nedpool *) 0, v); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc((nedpool *) 0, size); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc((nedpool *) 0, no, size); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc((nedpool *) 0, mem, size); }
NEDMALLOCNOALIASATTR void nedfree(void *mem) THROWSPEC { nedpfree((nedpool *) 0, mem); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign((nedpool *) 0, alignment, bytes); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc2(size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedpmalloc2((nedpool *) 0, size, alignment, flags); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc2(void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedprealloc2((nedpool *) 0, mem, size, alignment, flags); }
NEDMALLOCNOALIASATTR void nedfree2(void *mem, unsigned flags) THROWSPEC { nedpfree2((nedpool *) 0, mem, flags); }
NEDMALLOCNOALIASATTR struct nedmallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo((nedpool *) 0); }
NEDMALLOCNOALIASATTR int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt((nedpool *) 0, parno, value); }
NEDMALLOCNOALIASATTR int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim((nedpool *) 0, pad); }
void nedmalloc_stats() THROWSPEC { nedpmalloc_stats((nedpool *) 0); }
NEDMALLOCNOALIASATTR size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint((nedpool *) 0); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc((nedpool *) 0, elemsno, elemsize, chunks); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc((nedpool *) 0, elems, sizes, chunks); }
#ifdef WIN32
typedef unsigned __int64 timeCount;
static timeCount GetTimestamp()
{
static LARGE_INTEGER ticksPerSec;
static double scalefactor;
static timeCount baseCount;
LARGE_INTEGER val;
timeCount ret;
if(!scalefactor)
{
if(QueryPerformanceFrequency(&ticksPerSec))
scalefactor=ticksPerSec.QuadPart/1000000000000.0;
else
scalefactor=1;
}
if(!QueryPerformanceCounter(&val))
return (timeCount) GetTickCount() * 1000000000;
ret=(timeCount) (val.QuadPart/scalefactor);
if(!baseCount) baseCount=ret;
return ret-baseCount;
}
#else
#include <sys/time.h>
typedef unsigned long long timeCount;
static timeCount GetTimestamp()
{
static timeCount baseCount;
timeCount ret;
#ifdef CLOCK_MONOTONIC
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
ret=((timeCount) ts.tv_sec*1000000000000LL)+ts.tv_nsec*1000LL;
#else
struct timeval tv;
gettimeofday(&tv, 0);
ret=((timeCount) tv.tv_sec*1000000000000LL)+tv.tv_usec*1000000LL;
#endif
if(!baseCount) baseCount=ret;
return ret-baseCount;
}
#endif
/* Set ENABLE_LOGGING to an AND mask of which of these you want to
log, so set it to 0xffffffff for everything */
typedef enum LogEntryType_t
{
LOGENTRY_MALLOC =(1<<0),
LOGENTRY_REALLOC =(1<<1),
LOGENTRY_FREE =(1<<2),
LOGENTRY_THREADCACHE_MALLOC =(1<<3),
LOGENTRY_THREADCACHE_FREE =(1<<4),
LOGENTRY_THREADCACHE_CLEAN =(1<<5),
LOGENTRY_POOL_MALLOC =(1<<6),
LOGENTRY_POOL_REALLOC =(1<<7),
LOGENTRY_POOL_FREE =(1<<8)
} LogEntryType;
#if NEDMALLOC_STACKBACKTRACEDEPTH
typedef struct StackFrameType_t
{
void *pc;
char module[64];
char functname[256];
char file[96];
int lineno;
} StackFrameType;
#endif
typedef struct logentry_t
{
timeCount timestamp;
nedpool *np;
LogEntryType type;
int mspace;
size_t size;
void *mem;
size_t alignment;
unsigned flags;
void *returned;
#if NEDMALLOC_STACKBACKTRACEDEPTH
StackFrameType stack[NEDMALLOC_STACKBACKTRACEDEPTH];
#endif
} logentry;
static const char *LogEntryTypeStrings[]={
"LOGENTRY_MALLOC",
"LOGENTRY_REALLOC",
"LOGENTRY_FREE",
"LOGENTRY_THREADCACHE_MALLOC",
"LOGENTRY_THREADCACHE_FREE",
"LOGENTRY_THREADCACHE_CLEAN",
"LOGENTRY_POOL_MALLOC",
"LOGENTRY_POOL_REALLOC",
"LOGENTRY_POOL_FREE",
"******************"
};
struct threadcacheblk_t;
typedef struct threadcacheblk_t threadcacheblk;
struct threadcacheblk_t
{ /* Keep less than 32 bytes as sizeof(threadcacheblk) is the minimum allocation size */
#ifdef FULLSANITYCHECKS
unsigned int magic;
#endif
int isforeign;
unsigned int lastUsed;
size_t size;
threadcacheblk *RESTRICT next, *RESTRICT prev;
};
typedef struct threadcache_t
{
#ifdef FULLSANITYCHECKS
unsigned int magic1;
#endif
int mymspace; /* Last mspace entry this thread used */
long threadid;
unsigned int mallocs, frees, successes;
#if ENABLE_LOGGING
logentry *logentries, *logentriesptr, *logentriesend;
#endif
size_t freeInCache; /* How much free space is stored in this cache */
threadcacheblk *RESTRICT bins[(THREADCACHEMAXBINS+1)*2];
#ifdef FULLSANITYCHECKS
unsigned int magic2;
#endif
} threadcache;
struct nedpool_t
{
#if USE_LOCKS
MLOCK_T mutex;
#endif
void *uservalue;
int threads; /* Max entries in m to use */
threadcache *RESTRICT caches[THREADCACHEMAXCACHES];
TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */
mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */
};
static nedpool syspool;
#if ENABLE_LOGGING
#if NEDMALLOC_STACKBACKTRACEDEPTH
#if defined(WIN32) && defined(_MSC_VER)
#define COPY_STRING(d, s, maxlen) { size_t len=strlen(s); len=(len>maxlen) ? maxlen-1 : len; memcpy(d, s, len); d[len]=0; }
#pragma optimize("g", off)
static int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep, CONTEXT *ct) THROWSPEC
{
*ct=*ep->ContextRecord;
return EXCEPTION_EXECUTE_HANDLER;
}
static DWORD64 __stdcall GetModBase(HANDLE hProcess, DWORD64 dwAddr) THROWSPEC
{
DWORD64 modulebase;
// Try to get the module base if already loaded, otherwise load the module
modulebase=SymGetModuleBase64(hProcess, dwAddr);
if(modulebase)
return modulebase;
else
{
MEMORY_BASIC_INFORMATION stMBI ;
if ( 0 != VirtualQueryEx ( hProcess, (LPCVOID)(size_t)dwAddr, &stMBI, sizeof(stMBI)))
{
int n;
DWORD dwPathLen=0, dwNameLen=0 ;
TCHAR szFile[ MAX_PATH ], szModuleName[ MAX_PATH ] ;
MODULEINFO mi={0};
dwPathLen = GetModuleFileName ( (HMODULE) stMBI.AllocationBase , szFile, MAX_PATH );
dwNameLen = GetModuleBaseName (hProcess, (HMODULE) stMBI.AllocationBase , szModuleName, MAX_PATH );
for(n=dwNameLen; n>0; n--)
{
if(szModuleName[n]=='.')
{
szModuleName[n]=0;
break;
}
}
if(!GetModuleInformation(hProcess, (HMODULE) stMBI.AllocationBase, &mi, sizeof(mi)))
{
//fxmessage("WARNING: GetModuleInformation() returned error code %d\n", GetLastError());
}
if(!SymLoadModule64 ( hProcess, NULL, (PSTR)( (dwPathLen) ? szFile : 0), (PSTR)( (dwNameLen) ? szModuleName : 0),
(DWORD64) mi.lpBaseOfDll, mi.SizeOfImage))
{
//fxmessage("WARNING: SymLoadModule64() returned error code %d\n", GetLastError());
}
//fxmessage("%s, %p, %x, %x\n", szFile, mi.lpBaseOfDll, mi.SizeOfImage, (DWORD) mi.lpBaseOfDll+mi.SizeOfImage);
modulebase=SymGetModuleBase64(hProcess, dwAddr);
return modulebase;
}
}
return 0;
}
extern HANDLE sym_myprocess;
extern VOID (WINAPI *RtlCaptureContextAddr)(PCONTEXT);
extern void DeinitSym(void) THROWSPEC;
static void DoStackWalk(logentry *p) THROWSPEC
{
int i,i2;
HANDLE mythread=(HANDLE) GetCurrentThread();
STACKFRAME64 sf={ 0 };
CONTEXT ct={ 0 };
if(!sym_myprocess)
{
DWORD symopts;
DuplicateHandle(GetCurrentProcess(), GetCurrentProcess(), GetCurrentProcess(), &sym_myprocess, 0, FALSE, DUPLICATE_SAME_ACCESS);
symopts=SymGetOptions();
SymSetOptions(symopts /*| SYMOPT_DEFERRED_LOADS*/ | SYMOPT_LOAD_LINES);
SymInitialize(sym_myprocess, NULL, TRUE);
atexit(DeinitSym);
}
ct.ContextFlags=CONTEXT_FULL;
// Use RtlCaptureContext() if we have it as it saves an exception throw
if((VOID (WINAPI *)(PCONTEXT)) -1==RtlCaptureContextAddr)
RtlCaptureContextAddr=(VOID (WINAPI *)(PCONTEXT)) GetProcAddress(GetModuleHandle(L"kernel32"), "RtlCaptureContext");
if(RtlCaptureContextAddr)
RtlCaptureContextAddr(&ct);
else
{ // This is nasty, but it works
__try
{
int *foo=0;
*foo=78;
}
__except (ExceptionFilter(GetExceptionCode(), GetExceptionInformation(), &ct))
{
}
}
sf.AddrPC.Mode=sf.AddrStack.Mode=sf.AddrFrame.Mode=AddrModeFlat;
#if !(defined(_M_AMD64) || defined(_M_X64))
sf.AddrPC.Offset =ct.Eip;
sf.AddrStack.Offset=ct.Esp;
sf.AddrFrame.Offset=ct.Ebp;
#else
sf.AddrPC.Offset =ct.Rip;
sf.AddrStack.Offset=ct.Rsp;
sf.AddrFrame.Offset=ct.Rbp; // maybe Rdi?
#endif
for(i2=0; i2<NEDMALLOC_STACKBACKTRACEDEPTH; i2++)
{
IMAGEHLP_MODULE64 ihm={ sizeof(IMAGEHLP_MODULE64) };
char temp[MAX_PATH+sizeof(IMAGEHLP_SYMBOL64)];
IMAGEHLP_SYMBOL64 *ihs;
IMAGEHLP_LINE64 ihl={ sizeof(IMAGEHLP_LINE64) };
DWORD64 offset;
if(!StackWalk64(
#if !(defined(_M_AMD64) || defined(_M_X64))
IMAGE_FILE_MACHINE_I386,
#else
IMAGE_FILE_MACHINE_AMD64,
#endif
sym_myprocess, mythread, &sf, &ct, NULL, SymFunctionTableAccess64, GetModBase, NULL))
break;
if(0==sf.AddrPC.Offset)
break;
i=i2;
if(i) // Skip first entry relating to this function
{
DWORD lineoffset=0;
p->stack[i-1].pc=(void *)(size_t) sf.AddrPC.Offset;
if(SymGetModuleInfo64(sym_myprocess, sf.AddrPC.Offset, &ihm))
{
char *leaf;
leaf=strrchr(ihm.ImageName, '\\');
if(!leaf) leaf=ihm.ImageName-1;
COPY_STRING(p->stack[i-1].module, leaf+1, sizeof(p->stack[i-1].module));
}
else strcpy(p->stack[i-1].module, "<unknown>");
//fxmessage("WARNING: SymGetModuleInfo64() returned error code %d\n", GetLastError());
memset(temp, 0, MAX_PATH+sizeof(IMAGEHLP_SYMBOL64));
ihs=(IMAGEHLP_SYMBOL64 *) temp;
ihs->SizeOfStruct=sizeof(IMAGEHLP_SYMBOL64);
ihs->Address=sf.AddrPC.Offset;
ihs->MaxNameLength=MAX_PATH;
if(SymGetSymFromAddr64(sym_myprocess, sf.AddrPC.Offset, &offset, ihs))
{
COPY_STRING(p->stack[i-1].functname, ihs->Name, sizeof(p->stack[i-1].functname));
if(strlen(p->stack[i-1].functname)<sizeof(p->stack[i-1].functname)-8)
{
sprintf(strchr(p->stack[i-1].functname, 0), " +0x%x", offset);
}
}
else
strcpy(p->stack[i-1].functname, "<unknown>");
if(SymGetLineFromAddr64(sym_myprocess, sf.AddrPC.Offset, &lineoffset, &ihl))
{
char *leaf;
p->stack[i-1].lineno=ihl.LineNumber;
leaf=strrchr(ihl.FileName, '\\');
if(!leaf) leaf=ihl.FileName-1;
COPY_STRING(p->stack[i-1].file, leaf+1, sizeof(p->stack[i-1].file));
}
else
strcpy(p->stack[i-1].file, "<unknown>");
}
}
}
#pragma optimize("g", on)
#else
static void DoStackWalk(logentry *p) THROWSPEC
{
void *backtr[NEDMALLOC_STACKBACKTRACEDEPTH];
size_t size;
char **strings;
size_t i2;
size=backtrace(backtr, NEDMALLOC_STACKBACKTRACEDEPTH);
strings=backtrace_symbols(backtr, size);
for(i2=0; i2<size; i2++)
{ // Format can be <file path>(<mangled symbol>+0x<offset>) [<pc>]
// or can be <file path> [<pc>]
int start=0, end=strlen(strings[i2]), which=0, idx;
for(idx=0; idx<end; idx++)
{
if(0==which && (' '==strings[i2][idx] || '('==strings[i2][idx]))
{
int len=FXMIN(idx-start, (int) sizeof(p->stack[i2].file));
memcpy(p->stack[i2].file, strings[i2]+start, len);
p->stack[i2].file[len]=0;
which=(' '==strings[i2][idx]) ? 2 : 1;
start=idx+1;
}
else if(1==which && ')'==strings[i2][idx])
{
FXString functname(strings[i2]+start, idx-start);
FXint offset=functname.rfind("+0x");
FXString rawsymbol(functname.left(offset));
FXString symbol(rawsymbol.length() ? fxdemanglesymbol(rawsymbol, false) : rawsymbol);
symbol.append(functname.mid(offset));
int len=FXMIN(symbol.length(), (int) sizeof(p->stack[i2].functname));
memcpy(p->stack[i2].functname, symbol.text(), len);
p->stack[i2].functname[len]=0;
which=2;
}
else if(2==which && '['==strings[i2][idx])
{
start=idx+1;
which=3;
}
else if(3==which && ']'==strings[i2][idx])
{
FXString v(strings[i2]+start+2, idx-start-2);
p->stack[i2].pc=(void *)(FXuval)v.toULong(0, 16);
}
}
}
free(strings);
}
#endif
#endif
#endif
static FORCEINLINE logentry *LogOperation(threadcache *tc, nedpool *np, LogEntryType type, int mspace, size_t size, void *mem, size_t alignment, unsigned flags, void *returned) THROWSPEC
{
#if ENABLE_LOGGING
if(tc->logentries && NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned))
{
logentry *le;
if(tc->logentriesptr==tc->logentriesend)
{
mchunkptr cp=mem2chunk(tc->logentries);
size_t logentrieslen=chunksize(cp)-overhead_for(cp);
le=(logentry *) CallRealloc(0, tc->logentries, 0, logentrieslen, (logentrieslen*3)/2, 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8));
if(!le) return 0;
tc->logentriesptr=le+(tc->logentriesptr-tc->logentries);
tc->logentries=le;
cp=mem2chunk(tc->logentries);
logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry);
tc->logentriesend=tc->logentries+logentrieslen;
}
le=tc->logentriesptr++;
assert(le+1<=tc->logentriesend);
le->timestamp=GetTimestamp();
le->np=np;
le->type=type;
le->mspace=mspace;
le->size=size;
le->mem=mem;
le->alignment=alignment;
le->flags=flags;
le->returned=returned;
#if NEDMALLOC_STACKBACKTRACEDEPTH
DoStackWalk(le);
#endif
return le;
}
#endif
return 0;
}
static FORCEINLINE NEDMALLOCNOALIASATTR unsigned int size2binidx(size_t _size) THROWSPEC
{ /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */
unsigned int topbit, size=(unsigned int)(_size>>4);
/* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */
#if defined(__GNUC__)
topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size);
#elif defined(_MSC_VER) && _MSC_VER>=1300
{
unsigned long bsrTopBit;
_BitScanReverse(&bsrTopBit, size);
topbit = bsrTopBit;
}
#else
#if 0
union {
unsigned asInt[2];
double asDouble;
};
int n;
asDouble = (double)size + 0.5;
topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023;
#else
{
unsigned int x=size;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >>16);
x = ~x;
x = x - ((x >> 1) & 0x55555555);
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
x = (x + (x >> 4)) & 0x0F0F0F0F;
x = x + (x << 8);
x = x + (x << 16);
topbit=31 - (x >> 24);
}
#endif
#endif
return topbit;
}
#ifdef FULLSANITYCHECKS
static void tcsanitycheck(threadcacheblk *RESTRICT *RESTRICT ptr) THROWSPEC
{
assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1]));
if(ptr[0] && ptr[1])
{
assert(ptr[0]->isforeign || nedblkmstate(ptr[0]));
assert(ptr[1]->isforeign || nedblkmstate(ptr[1]));
assert(nedblksize(0, ptr[0], 0)>=sizeof(threadcacheblk));
assert(nedblksize(0, ptr[1], 0)>=sizeof(threadcacheblk));
assert(*(unsigned int *) "NEDN"==ptr[0]->magic);
assert(*(unsigned int *) "NEDN"==ptr[1]->magic);
assert(!ptr[0]->prev);
assert(!ptr[1]->next);
if(ptr[0]==ptr[1])
{
assert(!ptr[0]->next);
assert(!ptr[1]->prev);
}
}
}
static void tcfullsanitycheck(threadcache *tc) THROWSPEC
{
threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins;
int n;
for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2)
{
threadcacheblk *RESTRICT b, *RESTRICT ob=0;
tcsanitycheck(tcbptr);
for(b=tcbptr[0]; b; ob=b, b=b->next)
{
assert(b->isforeign || nedblkmstate(b));
assert(nedblksize(0, b, 0)>=sizeof(threadcacheblk));
assert(*(unsigned int *) "NEDN"==b->magic);
assert(!ob || ob->next==b);
assert(!ob || b->prev==ob);
}
}
}
#endif
static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC;
static NOINLINE void RemoveCacheEntries(nedpool *RESTRICT p, threadcache *RESTRICT tc, unsigned int age) THROWSPEC
{
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
if(tc->freeInCache)
{
threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins;
int n;
for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2)
{
threadcacheblk *RESTRICT *RESTRICT tcb=tcbptr+1; /* come from oldest end of list */
/*tcsanitycheck(tcbptr);*/
for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; )
{
threadcacheblk *RESTRICT f=*tcb;
size_t blksize=f->size; /*nedblksize(f);*/
assert(blksize<=nedblksize(0, f, 0));
assert(blksize);
#ifdef FULLSANITYCHECKS
assert(*(unsigned int *) "NEDN"==(*tcb)->magic);
#endif
*tcb=(*tcb)->prev;
if(*tcb)
(*tcb)->next=0;
else
*tcbptr=0;
tc->freeInCache-=blksize;
assert((long) tc->freeInCache>=0);
CallFree(0, f, f->isforeign);
/*tcsanitycheck(tcbptr);*/
LogOperation(tc, p, LOGENTRY_THREADCACHE_CLEAN, age, blksize, f, 0, 0, 0);
}
}
}
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
}
size_t nedflushlogs(nedpool *p, char *filepath) THROWSPEC
{
size_t count=0;
if(!p)
{
p=&syspool;
if(!syspool.threads) InitPool(&syspool, 0, -1);
}
if(p->caches)
{
threadcache *tc;
int n;
#if ENABLE_LOGGING
int haslogentries=0;
#endif
for(n=0; n<THREADCACHEMAXCACHES; n++)
{
if((tc=p->caches[n]))
{
count+=tc->freeInCache;
tc->frees++;
RemoveCacheEntries(p, tc, 0);
assert(!tc->freeInCache);
#if ENABLE_LOGGING
haslogentries|=!!tc->logentries;
#endif
}
}
#if ENABLE_LOGGING
if(haslogentries)
{
char buffer[MAX_PATH]=NEDMALLOC_LOGFILE;
FILE *oh;
fpos_t pos1, pos2;
if(!filepath) filepath=buffer;
oh=fopen(filepath, "r+");
while(!oh)
{
char *bptr;
if((oh=fopen(filepath, "w"))) break;
if(ENOSPC==errno) break;
bptr=strrchr(filepath, '.');
if(bptr-filepath>=MAX_PATH-6) abort();
memcpy(bptr, "!.csv", 6);
}
if(oh)
{
fgetpos(oh, &pos1);
fseek(oh, 0, SEEK_END);
fgetpos(oh, &pos2);
if(pos1==pos2)
fprintf(oh, "Timestamp, Pool, Operation, MSpace, Size, Block, Alignment, Flags, Returned,\"Stack Backtrace\"\n");
for(n=0; n<THREADCACHEMAXCACHES; n++)
{
if((tc=p->caches[n]) && tc->logentries)
{
logentry *le;
for(le=tc->logentries; le<tc->logentriesptr; le++)
{
const char *LogEntryTypeString=LogEntryTypeStrings[size2binidx(((size_t)le->type)<<4)];
char stackbacktrace[16384]="?";
#if NEDMALLOC_STACKBACKTRACEDEPTH
char *sbtp=stackbacktrace;
int i;
for(i=0; i<NEDMALLOC_STACKBACKTRACEDEPTH && le->stack[i].pc; i++)
{
sbtp+=sprintf(sbtp, "0x%p:%s:%s (%s:%u),",
le->stack[i].pc, le->stack[i].module, le->stack[i].functname, le->stack[i].file, le->stack[i].lineno);
if(sbtp>=stackbacktrace+sizeof(stackbacktrace)) abort();
}
if(NEDMALLOC_STACKBACKTRACEDEPTH==i)
strcpy(sbtp, "<backtrace may continue ...>");
else
strcpy(sbtp, "<backtrace ends>");
if(strchr(sbtp, 0)>=stackbacktrace+sizeof(stackbacktrace)) abort();
#endif
fprintf(oh, "%llu, 0x%p, %s, %d, %Iu, 0x%p, %Iu, 0x%x, 0x%p,\"%s\"\n",
le->timestamp, le->np, LogEntryTypeString, le->mspace, le->size, le->mem, le->alignment, le->flags, le->returned, stackbacktrace);
}
CallFree(0, tc->logentries, 0);
tc->logentries=tc->logentriesptr=tc->logentriesend=0;
}
}
fclose(oh);
}
}
#endif
}
return count;
}
static void DestroyCaches(nedpool *RESTRICT p) THROWSPEC
{
if(p->caches)
{
threadcache *tc;
int n;
nedflushlogs(p, 0);
for(n=0; n<THREADCACHEMAXCACHES; n++)
{
if((tc=p->caches[n]))
{
tc->mymspace=-1;
tc->threadid=0;
CallFree(0, tc, 0);
p->caches[n]=0;
}
}
}
}
static NOINLINE threadcache *AllocCache(nedpool *RESTRICT p) THROWSPEC
{
threadcache *tc=0;
int n, end;
#if USE_LOCKS
ACQUIRE_LOCK(&p->mutex);
#endif
for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++);
if(THREADCACHEMAXCACHES==n)
{ /* List exhausted, so disable for this thread */
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
return 0;
}
tc=p->caches[n]=(threadcache *) CallMalloc(p->m[0], sizeof(threadcache), 0, M2_ZERO_MEMORY);
if(!tc)
{
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
return 0;
}
#ifdef FULLSANITYCHECKS
tc->magic1=*(unsigned int *)"NEDMALC1";
tc->magic2=*(unsigned int *)"NEDMALC2";
#endif
tc->threadid=
#if USE_LOCKS
(long)(size_t)CURRENT_THREAD;
#else
1;
#endif
for(end=0; p->m[end]; end++);
tc->mymspace=abs(tc->threadid) % end;
#if ENABLE_LOGGING
{
mchunkptr cp;
size_t logentrieslen=2048/sizeof(logentry); /* One page */
tc->logentries=tc->logentriesptr=(logentry *) CallMalloc(p->m[0], logentrieslen*sizeof(logentry), 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8));
if(!tc->logentries)
{
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
return 0;
}
cp=mem2chunk(tc->logentries);
logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry);
tc->logentriesend=tc->logentries+logentrieslen;
}
#endif
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort();
return tc;
}
static void *threadcache_malloc(nedpool *RESTRICT p, threadcache *RESTRICT tc, size_t *RESTRICT _size) THROWSPEC
{
void *RESTRICT ret=0;
size_t size=*_size, blksize=0;
unsigned int bestsize;
unsigned int idx=size2binidx(size);
threadcacheblk *RESTRICT blk, *RESTRICT *RESTRICT binsptr;
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
/* Calculate best fit bin size */
bestsize=1<<(idx+4);
#if 0
/* Finer grained bin fit */
idx<<=1;
if(size>bestsize)
{
idx++;
bestsize+=bestsize>>1;
}
if(size>bestsize)
{
idx++;
bestsize=1<<(4+(idx>>1));
}
#else
if(size>bestsize)
{
idx++;
bestsize<<=1;
}
#endif
assert(bestsize>=size);
if(size<bestsize) size=bestsize;
assert(size<=THREADCACHEMAX);
assert(idx<=THREADCACHEMAXBINS);
binsptr=&tc->bins[idx*2];
/* Try to match close, but move up a bin if necessary */
blk=*binsptr;
if(!blk || blk->size<size)
{ /* Bump it up a bin */
if(idx<THREADCACHEMAXBINS)
{
idx++;
binsptr+=2;
blk=*binsptr;
}
}
if(blk)
{
blksize=blk->size; /*nedblksize(blk);*/
assert(nedblksize(0, blk, 0)>=blksize);
assert(blksize>=size);
if(blk->next)
blk->next->prev=0;
*binsptr=blk->next;
if(!*binsptr)
binsptr[1]=0;
#ifdef FULLSANITYCHECKS
blk->magic=0;
#endif
assert(binsptr[0]!=blk && binsptr[1]!=blk);
assert(nedblksize(0, blk, 0)>=sizeof(threadcacheblk) && nedblksize(0, blk, 0)<=THREADCACHEMAX+CHUNK_OVERHEAD);
/*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) _size);*/
ret=(void *) blk;
}
++tc->mallocs;
if(ret)
{
assert(blksize>=size);
++tc->successes;
tc->freeInCache-=blksize;
assert((long) tc->freeInCache>=0);
}
#if defined(DEBUG) && 0
if(!(tc->mallocs & 0xfff))
{
printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs,
(float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache);
}
#endif
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
*_size=size;
return ret;
}
static NOINLINE void ReleaseFreeInCache(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace) THROWSPEC
{
unsigned int age=THREADCACHEMAXFREESPACE/8192;
#if USE_LOCKS
/*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/
#endif
while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE)
{
RemoveCacheEntries(p, tc, age);
/*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/
age>>=1;
}
#if USE_LOCKS
/*RELEASE_LOCK(&p->m[mymspace]->mutex);*/
#endif
}
static void threadcache_free(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, void *RESTRICT mem, size_t size, int isforeign) THROWSPEC
{
unsigned int bestsize;
unsigned int idx=size2binidx(size);
threadcacheblk *RESTRICT *RESTRICT binsptr, *RESTRICT tck=(threadcacheblk *RESTRICT) mem;
assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD);
#ifdef DEBUG
/* Make sure this is a valid memory block */
assert(nedblksize(0, mem, 0));
#endif
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
/* Calculate best fit bin size */
bestsize=1<<(idx+4);
#if 0
/* Finer grained bin fit */
idx<<=1;
if(size>bestsize)
{
unsigned int biggerbestsize=bestsize+bestsize<<1;
if(size>=biggerbestsize)
{
idx++;
bestsize=biggerbestsize;
}
}
#endif
if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */
size=bestsize;
binsptr=&tc->bins[idx*2];
assert(idx<=THREADCACHEMAXBINS);
if(tck==*binsptr)
{
fprintf(stderr, "nedmalloc: Attempt to free already freed memory block %p - aborting!\n", tck);
abort();
}
#ifdef FULLSANITYCHECKS
tck->magic=*(unsigned int *) "NEDN";
#endif
tck->isforeign=isforeign;
tck->lastUsed=++tc->frees;
tck->size=(unsigned int) size;
tck->next=*binsptr;
tck->prev=0;
if(tck->next)
tck->next->prev=tck;
else
binsptr[1]=tck;
assert(!*binsptr || (*binsptr)->size==tck->size);
*binsptr=tck;
assert(tck==tc->bins[idx*2]);
assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck);
/*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/
tc->freeInCache+=size;
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
#if 1
if(tc->freeInCache>=THREADCACHEMAXFREESPACE)
ReleaseFreeInCache(p, tc, mymspace);
#endif
}
static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC
{ /* threads is -1 for system pool */
ensure_initialization();
ACQUIRE_MALLOC_GLOBAL_LOCK();
if(p->threads) goto done;
#if USE_LOCKS
if(INITIAL_LOCK(&p->mutex)) goto err;
#endif
if(TLSALLOC(&p->mycache)) goto err;
#if USE_ALLOCATOR==0
p->m[0]=(mstate) mspacecounter++;
#elif USE_ALLOCATOR==1
if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err;
p->m[0]->extp=p;
#endif
p->threads=(threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : (!threads) ? DEFAULTMAXTHREADSINPOOL : threads;
done:
RELEASE_MALLOC_GLOBAL_LOCK();
return 1;
err:
if(threads<0)
abort(); /* If you can't allocate for system pool, we're screwed */
DestroyCaches(p);
if(p->m[0])
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[0]);
#endif
p->m[0]=0;
}
if(p->mycache)
{
if(TLSFREE(p->mycache)) abort();
p->mycache=0;
}
RELEASE_MALLOC_GLOBAL_LOCK();
return 0;
}
static NOINLINE mstate FindMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int *RESTRICT lastUsed, size_t size) THROWSPEC
{ /* Gets called when thread's last used mspace is in use. The strategy
is to run through the list of all available mspaces looking for an
unlocked one and if we fail, we create a new one so long as we don't
exceed p->threads */
int n, end;
n=end=*lastUsed+1;
#if USE_LOCKS
for(; p->m[n]; end=++n)
{
if(TRY_LOCK(&p->m[n]->mutex)) goto found;
}
for(n=0; n<*lastUsed && p->m[n]; n++)
{
if(TRY_LOCK(&p->m[n]->mutex)) goto found;
}
if(end<p->threads)
{
mstate temp;
#if USE_ALLOCATOR==0
temp=(mstate) mspacecounter++;
#elif USE_ALLOCATOR==1
if(!(temp=(mstate) create_mspace(size, 1)))
goto badexit;
#endif
/* Now we're ready to modify the lists, we lock */
ACQUIRE_LOCK(&p->mutex);
while(p->m[end] && end<p->threads)
end++;
if(end>=p->threads)
{ /* Drat, must destroy it now */
RELEASE_LOCK(&p->mutex);
#if USE_ALLOCATOR==1
destroy_mspace((mstate) temp);
#endif
goto badexit;
}
/* We really want to make sure this goes into memory now but we
have to be careful of breaking aliasing rules, so write it twice */
{
volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end];
*_m=(p->m[end]=temp);
}
ACQUIRE_LOCK(&p->m[end]->mutex);
/*printf("Created mspace idx %d\n", end);*/
RELEASE_LOCK(&p->mutex);
n=end;
goto found;
}
/* Let it lock on the last one it used */
badexit:
ACQUIRE_LOCK(&p->m[*lastUsed]->mutex);
return p->m[*lastUsed];
#endif
found:
*lastUsed=n;
if(tc)
tc->mymspace=n;
else
{
if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort();
}
return p->m[n];
}
typedef struct PoolList_t
{
size_t size; /* Size of list */
size_t length; /* Actual entries in list */
#ifdef DEBUG
nedpool *list[1]; /* Force testing of list expansion */
#else
nedpool *list[16];
#endif
} PoolList;
#if USE_LOCKS
static MLOCK_T poollistlock;
#endif
static PoolList *poollist;
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC
{
nedpool *ret=0;
if(!poollist)
{
PoolList *newpoollist=0;
if(!(newpoollist=(PoolList *) nedpcalloc(0, 1, sizeof(PoolList)+sizeof(nedpool *)))) return 0;
#if USE_LOCKS
INITIAL_LOCK(&poollistlock);
ACQUIRE_LOCK(&poollistlock);
#endif
poollist=newpoollist;
poollist->size=sizeof(poollist->list)/sizeof(nedpool *);
}
#if USE_LOCKS
else
ACQUIRE_LOCK(&poollistlock);
#endif
if(poollist->length==poollist->size)
{
PoolList *newpoollist=0;
size_t newsize=0;
newsize=sizeof(PoolList)+(poollist->size+1)*sizeof(nedpool *);
if(!(newpoollist=(PoolList *) nedprealloc(0, poollist, newsize))) goto badexit;
poollist=newpoollist;
memset(&poollist->list[poollist->size], 0, newsize-((size_t)&poollist->list[poollist->size]-(size_t)&poollist->list[0]));
poollist->size=((newsize-((char *)&poollist->list[0]-(char *)poollist))/sizeof(nedpool *))-1;
assert(poollist->size>poollist->length);
}
if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) goto badexit;
if(!InitPool(ret, capacity, threads))
{
nedpfree(0, ret);
goto badexit;
}
poollist->list[poollist->length++]=ret;
badexit:
{
#if USE_LOCKS
RELEASE_LOCK(&poollistlock);
#endif
}
return ret;
}
void neddestroypool(nedpool *p) THROWSPEC
{
unsigned int n;
#if USE_LOCKS
ACQUIRE_LOCK(&p->mutex);
#endif
DestroyCaches(p);
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[n]);
#endif
p->m[n]=0;
}
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
DESTROY_LOCK(&p->mutex);
#endif
if(TLSFREE(p->mycache)) abort();
nedpfree(0, p);
#if USE_LOCKS
ACQUIRE_LOCK(&poollistlock);
#endif
assert(poollist);
for(n=0; n<poollist->length && poollist->list[n]!=p; n++);
assert(n!=poollist->length);
memmove(&poollist->list[n], &poollist->list[n+1], (size_t)&poollist->list[poollist->length]-(size_t)&poollist->list[n]);
if(!--poollist->length)
{
assert(!poollist->list[0]);
nedpfree(0, poollist);
poollist=0;
}
#if USE_LOCKS
RELEASE_LOCK(&poollistlock);
#endif
}
void neddestroysyspool() THROWSPEC
{
nedpool *p=&syspool;
int n;
#if USE_LOCKS
ACQUIRE_LOCK(&p->mutex);
#endif
DestroyCaches(p);
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[n]);
#endif
p->m[n]=0;
}
/* Render syspool unusable */
for(n=0; n<THREADCACHEMAXCACHES; n++)
p->caches[n]=(threadcache *)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL);
for(n=0; n<MAXTHREADSINPOOL+1; n++)
p->m[n]=(mstate)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL);
if(TLSFREE(p->mycache)) abort();
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
DESTROY_LOCK(&p->mutex);
#endif
}
nedpool **nedpoollist() THROWSPEC
{
nedpool **ret=0;
if(poollist)
{
#if USE_LOCKS
ACQUIRE_LOCK(&poollistlock);
#endif
if(!(ret=(nedpool **) nedmalloc((poollist->length+1)*sizeof(nedpool *)))) goto badexit;
memcpy(ret, poollist->list, (poollist->length+1)*sizeof(nedpool *));
badexit:
{
#if USE_LOCKS
RELEASE_LOCK(&poollistlock);
#endif
}
}
return ret;
}
void nedpsetvalue(nedpool *p, void *v) THROWSPEC
{
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
p->uservalue=v;
}
void *nedgetvalue(nedpool **p, void *mem) THROWSPEC
{
nedpool *np=0;
mstate fm=nedblkmstate(mem);
if(!fm || !fm->extp) return 0;
np=(nedpool *) fm->extp;
if(p) *p=np;
return np->uservalue;
}
void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC
{
int mycache;
if(!p)
{
p=&syspool;
if(!syspool.threads) InitPool(&syspool, 0, -1);
}
mycache=(int)(size_t) TLSGET(p->mycache);
if(!mycache)
{ /* Set to mspace 0 */
if(disable && TLSSET(p->mycache, (void *)(size_t)-1)) abort();
}
else if(mycache>0)
{ /* Set to last used mspace */
threadcache *tc=p->caches[mycache-1];
#if defined(DEBUG)
printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n",
100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs);
#endif
if(disable && TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort();
tc->frees++;
RemoveCacheEntries(p, tc, 0);
assert(!tc->freeInCache);
if(disable)
{
tc->mymspace=-1;
tc->threadid=0;
CallFree(0, p->caches[mycache-1], 0);
p->caches[mycache-1]=0;
}
}
}
void neddisablethreadcache(nedpool *p) THROWSPEC
{
nedtrimthreadcache(p, 1);
}
#if USE_LOCKS && USE_ALLOCATOR==1
#define GETMSPACE(m,p,tc,ms,s,action) \
do \
{ \
mstate m = GetMSpace((p),(tc),(ms),(s)); \
action; \
RELEASE_LOCK(&m->mutex); \
} while (0)
#else
#define GETMSPACE(m,p,tc,ms,s,action) \
do \
{ \
mstate m = GetMSpace((p),(tc),(ms),(s)); \
action; \
} while (0)
#endif
static FORCEINLINE mstate GetMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, size_t size) THROWSPEC
{ /* Returns a locked and ready for use mspace */
mstate m=p->m[mymspace];
assert(m);
#if USE_LOCKS && USE_ALLOCATOR==1
if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);
/*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/
#endif
return m;
}
static NOINLINE void GetThreadCache_cold1(nedpool *RESTRICT *RESTRICT p) THROWSPEC
{
*p=&syspool;
if(!syspool.threads) InitPool(&syspool, 0, -1);
}
static NOINLINE void GetThreadCache_cold2(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, int mycache) THROWSPEC
{
if(!mycache)
{ /* Need to allocate a new cache */
*tc=AllocCache(*p);
if(!*tc)
{ /* Disable */
if(TLSSET((*p)->mycache, (void *)(size_t)-1)) abort();
*mymspace=0;
}
else
*mymspace=(*tc)->mymspace;
}
else
{ /* Cache disabled, but we do have an assigned thread pool */
*tc=0;
*mymspace=-mycache-1;
}
}
static FORCEINLINE void GetThreadCache(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, size_t *RESTRICT size) THROWSPEC
{
int mycache;
#if THREADCACHEMAX
if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk);
#endif
if(!*p)
GetThreadCache_cold1(p);
mycache=(int)(size_t) TLSGET((*p)->mycache);
if(mycache>0)
{ /* Already have a cache */
*tc=(*p)->caches[mycache-1];
*mymspace=(*tc)->mymspace;
}
else GetThreadCache_cold2(p, tc, mymspace, mycache);
assert(*mymspace>=0);
#if USE_LOCKS
assert(!(*tc) || (long)(size_t)CURRENT_THREAD==(*tc)->threadid);
#endif
#ifdef FULLSANITYCHECKS
if(*tc)
{
if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2)
{
abort();
}
}
#endif
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc2(nedpool *p, size_t size, size_t alignment, unsigned flags) THROWSPEC
{
void *ret=0;
threadcache *tc;
int mymspace;
GetThreadCache(&p, &tc, &mymspace, &size);
#if THREADCACHEMAX
if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size<=THREADCACHEMAX)
{ /* Use the thread cache */
if((ret=threadcache_malloc(p, tc, &size)))
{
if((flags & M2_ZERO_MEMORY))
memset(ret, 0, size);
LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, 0, alignment, flags, ret);
}
}
#endif
if(!ret)
{ /* Use this thread's mspace */
GETMSPACE(m, p, tc, mymspace, size,
ret=CallMalloc(m, size, alignment, flags));
if(ret)
LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, size, 0, alignment, flags, ret);
}
LogOperation(tc, p, LOGENTRY_MALLOC, mymspace, size, 0, alignment, flags, ret);
return ret;
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc2(nedpool *p, void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC
{
void *ret=0;
threadcache *tc;
int mymspace, isforeign=1;
size_t memsize;
if(!mem) return nedpmalloc2(p, size, alignment, flags);
#if REALLOC_ZERO_BYTES_FREES
if(!size)
{
nedpfree2(p, mem, flags);
return 0;
}
#endif
memsize=nedblksize(&isforeign, mem, flags);
assert(memsize);
if(!memsize)
{
fprintf(stderr, "nedmalloc: nedprealloc() called with a block not created by nedmalloc!\n");
abort();
}
else if(size<=memsize && memsize-size<
#ifdef DEBUG
32
#else
1024
#endif
) /* If realloc size is within 1Kb smaller than existing, noop it */
return mem;
GetThreadCache(&p, &tc, &mymspace, &size);
#if THREADCACHEMAX
if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size && size<=THREADCACHEMAX)
{ /* Use the thread cache */
if((ret=threadcache_malloc(p, tc, &size)))
{
size_t tocopy=memsize<size ? memsize : size;
memcpy(ret, mem, tocopy);
if((flags & M2_ZERO_MEMORY) && size>memsize)
memset((void *)((size_t)ret+memsize), 0, size-memsize);
LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, mem, alignment, flags, ret);
if(!isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD))
{
threadcache_free(p, tc, mymspace, mem, memsize, isforeign);
LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0);
}
else
{
CallFree(0, mem, isforeign);
LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0);
}
}
}
#endif
if(!ret)
{ /* Reallocs always happen in the mspace they happened in, so skip
locking the preferred mspace for this thread */
ret=CallRealloc(p->m[mymspace], mem, isforeign, memsize, size, alignment, flags);
if(ret)
LogOperation(tc, p, LOGENTRY_POOL_REALLOC, mymspace, size, mem, alignment, flags, ret);
}
LogOperation(tc, p, LOGENTRY_REALLOC, mymspace, size, mem, alignment, flags, ret);
return ret;
}
NEDMALLOCNOALIASATTR void nedpfree2(nedpool *p, void *mem, unsigned flags) THROWSPEC
{ /* Frees always happen in the mspace they happened in, so skip
locking the preferred mspace for this thread */
threadcache *tc;
int mymspace, isforeign=1;
size_t memsize;
if(!mem)
{ /* If you tried this on FreeBSD you'd be sorry! */
#ifdef DEBUG
fprintf(stderr, "nedmalloc: WARNING nedpfree() called with zero. This is not portable behaviour!\n");
#endif
return;
}
memsize=nedblksize(&isforeign, mem, flags);
assert(memsize);
if(!memsize)
{
fprintf(stderr, "nedmalloc: nedpfree() called with a block not created by nedmalloc!\n");
abort();
}
GetThreadCache(&p, &tc, &mymspace, 0);
#if THREADCACHEMAX
if(mem && tc && !isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD))
{
threadcache_free(p, tc, mymspace, mem, memsize, isforeign);
LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0);
}
else
#endif
{
CallFree(0, mem, isforeign);
LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0);
}
LogOperation(tc, p, LOGENTRY_FREE, mymspace, memsize, mem, 0, 0, 0);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC
{
unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, size);
return nedpmalloc2(p, size, 0, flags);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC
{
size_t bytes=no*size;
/* Avoid multiplication overflow. */
if(size && no!=bytes/size)
return 0;
unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes);
return nedpmalloc2(p, bytes, 0, M2_ZERO_MEMORY|flags);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC
{
unsigned flags=NEDMALLOC_FORCERESERVE(p, mem, size);
#if ENABLE_USERMODEPAGEALLOCATOR
/* If the user mode page allocator is turned on in a 32 bit process,
don't automatically reserve eight times the address space. */
if(8==sizeof(size_t) || !OSHavePhysicalPageSupport())
#endif
{ /* If he reallocs even once, it's probably wise to turn on address space reservation.
If the size is larger than mmap_threshold then it'll set the reserve. */
if(!(flags & M2_RESERVE_MASK)) flags=M2_RESERVE_MULT(8);
}
return nedprealloc2(p, mem, size, 0, flags);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC
{
unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes);
return nedpmalloc2(p, bytes, alignment, flags);
}
NEDMALLOCNOALIASATTR void nedpfree(nedpool *p, void *mem) THROWSPEC
{
nedpfree2(p, mem, 0);
}
struct nedmallinfo nedpmallinfo(nedpool *p) THROWSPEC
{
int n;
struct nedmallinfo ret={0};
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1 && !NO_MALLINFO
struct mallinfo t=mspace_mallinfo(p->m[n]);
ret.arena+=t.arena;
ret.ordblks+=t.ordblks;
ret.hblkhd+=t.hblkhd;
ret.usmblks+=t.usmblks;
ret.uordblks+=t.uordblks;
ret.fordblks+=t.fordblks;
ret.keepcost+=t.keepcost;
#endif
}
return ret;
}
int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC
{
#if USE_ALLOCATOR==1
return mspace_mallopt(parno, value);
#else
return 0;
#endif
}
NEDMALLOCNOALIASATTR void* nedmalloc_internals(size_t *granularity, size_t *magic) THROWSPEC
{
#if USE_ALLOCATOR==1
if(granularity) *granularity=mparams.granularity;
if(magic) *magic=mparams.magic;
return (void *) &syspool;
#else
if(granularity) *granularity=0;
if(magic) *magic=0;
return 0;
#endif
}
int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC
{
int n, ret=0;
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
ret+=mspace_trim(p->m[n], pad);
#endif
}
return ret;
}
void nedpmalloc_stats(nedpool *p) THROWSPEC
{
int n;
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
mspace_malloc_stats(p->m[n]);
#endif
}
}
size_t nedpmalloc_footprint(nedpool *p) THROWSPEC
{
size_t ret=0;
int n;
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
ret+=mspace_footprint(p->m[n]);
#endif
}
return ret;
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC
{
void **ret;
threadcache *tc;
int mymspace;
GetThreadCache(&p, &tc, &mymspace, &elemsize);
#if USE_ALLOCATOR==0
GETMSPACE(m, p, tc, mymspace, elemsno*elemsize,
ret=unsupported_operation("independent_calloc"));
#elif USE_ALLOCATOR==1
GETMSPACE(m, p, tc, mymspace, elemsno*elemsize,
ret=mspace_independent_calloc(m, elemsno, elemsize, chunks));
#endif
#if ENABLE_LOGGING
if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC))
{
size_t n;
for(n=0; n<elemsno; n++)
{
LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, elemsize, 0, 0, M2_ZERO_MEMORY, ret[n]);
}
}
#endif
return ret;
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC
{
void **ret;
threadcache *tc;
int mymspace;
size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t));
if(!adjustedsizes) return 0;
for(i=0; i<elems; i++)
adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i];
GetThreadCache(&p, &tc, &mymspace, 0);
#if USE_ALLOCATOR==0
GETMSPACE(m, p, tc, mymspace, 0,
ret=unsupported_operation("independent_comalloc"));
#elif USE_ALLOCATOR==1
GETMSPACE(m, p, tc, mymspace, 0,
ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks));
#endif
#if ENABLE_LOGGING
if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC))
{
size_t n;
for(n=0; n<elems; n++)
{
LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, sizes[n], 0, 0, 0, ret[n]);
}
}
#endif
return ret;
}
#if defined(__cplusplus)
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3669_0 |
crossvul-cpp_data_good_3668_0 | /* Alternative malloc implementation for multiple threads without
lock contention based on dlmalloc. (C) 2005-2010 Niall Douglas
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#if 0 /* Effectively makes nedmalloc = dlmalloc */
#define THREADCACHEMAX 0
#define DEFAULT_GRANULARITY 65536
#define DEFAULTMAXTHREADSINPOOL 1
#endif
#ifdef _MSC_VER
/* Enable full aliasing on MSVC */
/*#pragma optimize("a", on)*/
/*#pragma optimize("g", off)*/
#pragma warning(push)
#pragma warning(disable:4100) /* unreferenced formal parameter */
#pragma warning(disable:4127) /* conditional expression is constant */
#pragma warning(disable:4232) /* address of dllimport is not static, identity not guaranteed */
#pragma warning(disable:4706) /* assignment within conditional expression */
#define _CRT_SECURE_NO_WARNINGS 1 /* Don't care about MSVC warnings on POSIX functions */
#include <stdio.h>
#define fopen(f, m) _fsopen((f), (m), 0x40/*_SH_DENYNO*/) /* Have Windows let other programs view the log file as it is written */
#ifndef UNICODE
#define UNICODE /* Turn on windows unicode support */
#endif
#else
#include <stdio.h>
#endif
/*#define NEDMALLOC_DEBUG 1*/
/*#define ENABLE_LOGGING 7
#define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) (((type)&ENABLE_LOGGING)&&((size)>16*1024))
#define NEDMALLOC_STACKBACKTRACEDEPTH 16*/
/*#define NEDMALLOC_FORCERESERVE(p, mem, size) (((size)>=(256*1024)) ? M2_RESERVE_MULT(8) : 0)*/
/*#define WIN32_DIRECT_USE_FILE_MAPPINGS 0*/
/*#define NEDMALLOC_DEBUG 1*/
/*#define FULLSANITYCHECKS*/
/* There is only support for the user mode page allocator on Windows at present */
#if !defined(ENABLE_USERMODEPAGEALLOCATOR)
#define ENABLE_USERMODEPAGEALLOCATOR 0
#endif
/* If link time code generation is on, don't force or prevent inlining */
#if defined(_MSC_VER) && defined(NEDMALLOC_DLL_EXPORTS)
#define FORCEINLINE
#define NOINLINE
#endif
#include "nedmalloc.h"
#include <errno.h>
#if defined(WIN32)
#include <malloc.h>
#else
#if defined(__cplusplus)
extern "C"
#else
extern
#endif
#if defined(__linux__) || defined(__FreeBSD__)
/* Sadly we can't include <malloc.h> as it causes a redefinition error */
size_t malloc_usable_size(void *);
#elif defined(__APPLE__)
size_t malloc_size(const void *ptr);
#else
#error Do not know what to do here
#endif
#endif
#if USE_ALLOCATOR==1
#define MSPACES 1
#define ONLY_MSPACES 1
#endif
#define USE_DL_PREFIX 1
#ifndef USE_LOCKS
#define USE_LOCKS 1
#endif
#define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */
#ifndef NEDMALLOC_DEBUG
#if defined(DEBUG) || defined(_DEBUG)
#define NEDMALLOC_DEBUG 1
#else
#define NEDMALLOC_DEBUG 0
#endif
#endif
/* We need to consistently define DEBUG=0|1, _DEBUG and NDEBUG for dlmalloc */
#if !defined(DEBUG) && !defined(NDEBUG)
#ifdef __GNUC__
#warning DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed.
#elif defined(_MSC_VER)
#pragma message(__FILE__ ": WARNING: DEBUG may not be defined but without NDEBUG being defined allocator will run with assert checking! Define NDEBUG to run at full speed.")
#endif
#endif
#undef DEBUG
#undef _DEBUG
#if NEDMALLOC_DEBUG
#define _DEBUG
#define DEBUG 1
#else
#define DEBUG 0
#endif
#ifdef NDEBUG /* Disable assert checking on release builds */
#undef DEBUG
#undef _DEBUG
#endif
/* The default of 64Kb means we spend too much time kernel-side */
#ifndef DEFAULT_GRANULARITY
#define DEFAULT_GRANULARITY (1*1024*1024)
#if DEBUG
#define DEFAULT_GRANULARITY_ALIGNED
#endif
#endif
/*#define USE_SPIN_LOCKS 0*/
#if ENABLE_USERMODEPAGEALLOCATOR
extern int OSHavePhysicalPageSupport(void);
extern void *userpage_malloc(size_t toallocate, unsigned flags);
extern int userpage_free(void *mem, size_t size);
extern void *userpage_realloc(void *mem, size_t oldsize, size_t newsize, int flags, unsigned flags2);
#define USERPAGE_TOPDOWN (M2_CUSTOM_FLAGS_BEGIN<<0)
#define USERPAGE_NOCOMMIT (M2_CUSTOM_FLAGS_BEGIN<<1)
/* This can provide a very significant speed boost */
#undef MMAP_CLEARS
#define MMAP_CLEARS 0
#define MUNMAP(h, a, s) (!OSHavePhysicalPageSupport() ? MUNMAP_DEFAULT((h), (a), (s)) : userpage_free((a), (s)))
#define MMAP(s, f) (!OSHavePhysicalPageSupport() ? MMAP_DEFAULT((s)) : userpage_malloc((s), (f)))
#define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : userpage_realloc((addr), (osz), (nsz), (mv), 0))
#define DIRECT_MMAP(h, s, f) (!OSHavePhysicalPageSupport() ? DIRECT_MMAP_DEFAULT((h), (s), (f)) : userpage_malloc((s), (f)|USERPAGE_TOPDOWN))
#define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : userpage_realloc((a), (os), (ns), (f), (f2)|USERPAGE_TOPDOWN))
/*#undef MREMAP
#define MREMAP(addr, osz, nsz, mv) (!OSHavePhysicalPageSupport() ? MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) : MFAIL)*/
/*#undef DIRECT_MREMAP
#define DIRECT_MREMAP(h, a, os, ns, f, f2) (!OSHavePhysicalPageSupport() ? DIRECT_MREMAP_DEFAULT((h), (a), (os), (ns), (f), (f2)) : MFAIL)*/
#endif
#include "malloc.c.h"
#ifdef NDEBUG /* Disable assert checking on release builds */
#undef DEBUG
#endif
/* The default number of threads allowed into a pool at once */
#ifndef DEFAULTMAXTHREADSINPOOL
#define DEFAULTMAXTHREADSINPOOL 4
#endif
/* The maximum size to be allocated from the thread cache */
#ifndef THREADCACHEMAX
#define THREADCACHEMAX 8192
#elif THREADCACHEMAX && !defined(THREADCACHEMAXBINS)
#ifdef __GNUC__
#warning If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)?
#elif defined(_MSC_VER)
#pragma message(__FILE__ ": WARNING: If you are changing THREADCACHEMAX, do you also need to change THREADCACHEMAXBINS=(topbitpos(THREADCACHEMAX)-4)?")
#endif
#endif
/* The maximum concurrent threads in a pool possible */
#ifndef MAXTHREADSINPOOL
#define MAXTHREADSINPOOL 16
#endif
/* The maximum number of threadcaches which can be allocated */
#ifndef THREADCACHEMAXCACHES
#define THREADCACHEMAXCACHES 256
#endif
#ifndef THREADCACHEMAXBINS
#if 0
/* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */
#define THREADCACHEMAXBINS ((13-4)*2)
#else
/* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */
#define THREADCACHEMAXBINS (13-4)
#endif
#endif
/* Point at which the free space in a thread cache is garbage collected */
#ifndef THREADCACHEMAXFREESPACE
#define THREADCACHEMAXFREESPACE (1024*1024)
#endif
/* NEDMALLOC_FORCERESERVE is used to force malloc2 flags for normal malloc, calloc et al */
#ifndef NEDMALLOC_FORCERESERVE
#define NEDMALLOC_FORCERESERVE(p, mem, size) 0
#endif
/* ENABLE_LOGGING is a bitmask of what events to log */
#if ENABLE_LOGGING
#ifndef NEDMALLOC_LOGFILE
#define NEDMALLOC_LOGFILE "nedmalloc.csv"
#endif
#endif
/* NEDMALLOC_TESTLOGENTRY returns non-zero if the entry should be logged */
#ifndef NEDMALLOC_TESTLOGENTRY
#define NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned) ((type)&ENABLE_LOGGING)
#endif
/* NEDMALLOC_STACKBACKTRACEDEPTH has the logger store a stack backtrace per logged item. Slow! */
#ifndef NEDMALLOC_STACKBACKTRACEDEPTH
#define NEDMALLOC_STACKBACKTRACEDEPTH 0
#endif
#if NEDMALLOC_STACKBACKTRACEDEPTH
#include "Dbghelp.h"
#include "psapi.h"
#pragma comment(lib, "dbghelp.lib")
#pragma comment(lib, "psapi.lib")
#endif
#define NM_FLAGS_MASK (M2_FLAGS_MASK&~M2_ZERO_MEMORY)
#if USE_LOCKS
#ifdef WIN32
#define TLSVAR DWORD
#define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k))
#define TLSFREE(k) (!TlsFree(k))
#define TLSGET(k) TlsGetValue(k)
#define TLSSET(k, a) (!TlsSetValue(k, a))
#ifdef DEBUG
static LPVOID ChkedTlsGetValue(DWORD idx)
{
LPVOID ret=TlsGetValue(idx);
assert(S_OK==GetLastError());
return ret;
}
#undef TLSGET
#define TLSGET(k) ChkedTlsGetValue(k)
#endif
#else
#define TLSVAR pthread_key_t
#define TLSALLOC(k) pthread_key_create(k, 0)
#define TLSFREE(k) pthread_key_delete(k)
#define TLSGET(k) pthread_getspecific(k)
#define TLSSET(k, a) pthread_setspecific(k, a)
#endif
#else /* Probably if you're not using locks then you don't want ANY pthread stuff at all */
#define TLSVAR void *
#define TLSALLOC(k) (*k=0)
#define TLSFREE(k) (k=0)
#define TLSGET(k) k
#define TLSSET(k, a) (k=a, 0)
#endif
#if ENABLE_USERMODEPAGEALLOCATOR
#include "usermodepageallocator.c"
#endif
#if defined(__cplusplus)
#if !defined(NO_NED_NAMESPACE)
namespace nedalloc {
#else
extern "C" {
#endif
#endif
#if USE_ALLOCATOR==0
static void *unsupported_operation(const char *opname) THROWSPEC
{
fprintf(stderr, "nedmalloc: The operation %s is not supported under this build configuration\n", opname);
abort();
return 0;
}
static size_t mspacecounter=(size_t) 0xdeadbeef;
#endif
#ifndef ENABLE_FAST_HEAP_DETECTION
static void *RESTRICT leastusedaddress;
static size_t largestusedblock;
#endif
/* Used to redirect system allocator ops if needed */
extern void *(*sysmalloc)(size_t);
extern void *(*syscalloc)(size_t, size_t);
extern void *(*sysrealloc)(void *, size_t);
extern void (*sysfree)(void *);
extern size_t (*sysblksize)(void *);
#if !defined(REPLACE_SYSTEM_ALLOCATOR) || (!defined(_MSC_VER) && !defined(__MINGW32__))
void *(*sysmalloc)(size_t)=malloc;
void *(*syscalloc)(size_t, size_t)=calloc;
void *(*sysrealloc)(void *, size_t)=realloc;
void (*sysfree)(void *)=free;
size_t (*sysblksize)(void *)=
#if defined(_MSC_VER) || defined(__MINGW32__)
/* This is the MSVCRT equivalent */
_msize;
#elif defined(__linux__) || defined(__FreeBSD__)
/* This is the glibc/ptmalloc2/dlmalloc/BSD equivalent. */
malloc_usable_size;
#elif defined(__APPLE__)
/* This is the Apple BSD libc equivalent. */
malloc_size;
#else
#error Cannot tolerate the memory allocator of an unknown system!
#endif
#else
/* Remove the MSVCRT dependency on the memory functions */
void *(*sysmalloc)(size_t);
void *(*syscalloc)(size_t, size_t);
void *(*sysrealloc)(void *, size_t);
void (*sysfree)(void *);
size_t (*sysblksize)(void *);
#endif
static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallMalloc(void *RESTRICT mspace, size_t size, size_t alignment, unsigned flags) THROWSPEC
{
void *RESTRICT ret=0;
#if USE_MAGIC_HEADERS
size_t _alignment=alignment;
size_t *_ret=0;
size_t bytes=size+alignment+3*sizeof(size_t);
/* Avoid addition overflow. */
if(bytes<size)
return 0;
size=bytes;
_alignment=0;
#endif
#if USE_ALLOCATOR==0
ret=(flags & M2_ZERO_MEMORY) ? syscalloc(1, size) : sysmalloc(size); /* magic headers takes care of alignment */
#elif USE_ALLOCATOR==1
ret=mspace_malloc2((mstate) mspace, size, alignment, flags);
#ifndef ENABLE_FAST_HEAP_DETECTION
if(ret)
{
mchunkptr p=mem2chunk(ret);
size_t truesize=chunksize(p) - overhead_for(p);
if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr;
if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1);
}
#endif
#endif
if(!ret) return 0;
#if DEBUG
if(flags & M2_ZERO_MEMORY)
{
const char *RESTRICT n;
for(n=(const char *)ret; n<(const char *)ret+size; n++)
{
assert(!*n);
}
}
#endif
#if USE_MAGIC_HEADERS
_ret=(size_t *) ret;
ret=(void *)(_ret+3);
if(alignment) ret=(void *)(((size_t) ret+alignment-1)&~(alignment-1));
for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *)"NEDMALOC";
_ret[0]=(size_t) mspace;
_ret[1]=size-3*sizeof(size_t);
#endif
return ret;
}
static FORCEINLINE NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void *CallRealloc(void *RESTRICT mspace, void *RESTRICT mem, int isforeign, size_t oldsize, size_t newsize, size_t alignment, unsigned flags) THROWSPEC
{
void *RESTRICT ret=0;
#if USE_MAGIC_HEADERS
mstate oldmspace=0;
size_t *_ret=0, *_mem=(size_t *) mem-3;
#endif
if(isforeign)
{ /* Transfer */
#if USE_MAGIC_HEADERS
assert(_mem[0]!=*(size_t *) "NEDMALOC");
#endif
if((ret=CallMalloc(mspace, newsize, alignment, flags)))
{
#if defined(DEBUG)
printf("*** nedmalloc frees system allocated block %p\n", mem);
#endif
memcpy(ret, mem, oldsize<newsize ? oldsize : newsize);
sysfree(mem);
}
return ret;
}
#if USE_MAGIC_HEADERS
assert(_mem[0]==*(size_t *) "NEDMALOC");
newsize+=3*sizeof(size_t);
oldmspace=(mstate) _mem[1];
assert(oldsize>=_mem[2]);
for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc");
mem=(void *)(++_mem);
#endif
#if USE_ALLOCATOR==0
ret=sysrealloc(mem, newsize);
#elif USE_ALLOCATOR==1
ret=mspace_realloc2((mstate) mspace, mem, newsize, alignment, flags);
#ifndef ENABLE_FAST_HEAP_DETECTION
if(ret)
{
mchunkptr p=mem2chunk(ret);
size_t truesize=chunksize(p) - overhead_for(p);
if(!leastusedaddress || (void *)((mstate) mspace)->least_addr<leastusedaddress) leastusedaddress=(void *)((mstate) mspace)->least_addr;
if(!largestusedblock || truesize>largestusedblock) largestusedblock=(truesize+mparams.page_size) & ~(mparams.page_size-1);
}
#endif
#endif
if(!ret)
{ /* Put it back the way it was */
#if USE_MAGIC_HEADERS
for(; *_mem==0; *_mem++=*(size_t *) "NEDMALOC");
#endif
return 0;
}
#if USE_MAGIC_HEADERS
_ret=(size_t *) ret;
ret=(void *)(_ret+3);
for(; _ret<(size_t *)ret-2; _ret++) *_ret=*(size_t *) "NEDMALOC";
_ret[0]=(size_t) mspace;
_ret[1]=newsize-3*sizeof(size_t);
#endif
return ret;
}
static FORCEINLINE void CallFree(void *RESTRICT mspace, void *RESTRICT mem, int isforeign) THROWSPEC
{
#if USE_MAGIC_HEADERS
mstate oldmspace=0;
size_t *_mem=(size_t *) mem-3, oldsize=0;
#endif
if(isforeign)
{
#if USE_MAGIC_HEADERS
assert(_mem[0]!=*(size_t *) "NEDMALOC");
#endif
#if defined(DEBUG)
printf("*** nedmalloc frees system allocated block %p\n", mem);
#endif
sysfree(mem);
return;
}
#if USE_MAGIC_HEADERS
assert(_mem[0]==*(size_t *) "NEDMALOC");
oldmspace=(mstate) _mem[1];
oldsize=_mem[2];
for(; *_mem==*(size_t *) "NEDMALOC"; *_mem--=*(size_t *) "nedmaloc");
mem=(void *)(++_mem);
#endif
#if USE_ALLOCATOR==0
sysfree(mem);
#elif USE_ALLOCATOR==1
mspace_free((mstate) mspace, mem);
#endif
}
static NEDMALLOCNOALIASATTR mstate nedblkmstate(void *RESTRICT mem) THROWSPEC
{
if(mem)
{
#if USE_MAGIC_HEADERS
size_t *_mem=(size_t *) mem-3;
if(_mem[0]==*(size_t *) "NEDMALOC")
{
return (mstate) _mem[1];
}
else return 0;
#else
#if USE_ALLOCATOR==0
/* Fail everything */
return 0;
#elif USE_ALLOCATOR==1
#ifdef ENABLE_FAST_HEAP_DETECTION
#ifdef WIN32
/* On Windows for RELEASE both x86 and x64 the NT heap precedes each block with an eight byte header
which looks like:
normal: 4 bytes of size, 4 bytes of [char < 64, char < 64, char < 64 bit 0 always set, char random ]
mmaped: 4 bytes of size 4 bytes of [zero, zero, 0xb, zero ]
On Windows for DEBUG both x86 and x64 the preceding four bytes is always 0xfdfdfdfd (no man's land).
*/
#pragma pack(push, 1)
struct _HEAP_ENTRY
{
USHORT Size;
USHORT PreviousSize;
UCHAR Cookie; /* SegmentIndex */
UCHAR Flags; /* always bit 0 (HEAP_ENTRY_BUSY). bit 1=(HEAP_ENTRY_EXTRA_PRESENT), bit 2=normal block (HEAP_ENTRY_FILL_PATTERN), bit 3=mmap block (HEAP_ENTRY_VIRTUAL_ALLOC). Bit 4 (HEAP_ENTRY_LAST_ENTRY) could be set */
UCHAR UnusedBytes;
UCHAR SmallTagIndex; /* fastbin index. Always one of 0x02, 0x03, 0x04 < 0x80 */
} *RESTRICT he=((struct _HEAP_ENTRY *) mem)-1;
#pragma pack(pop)
unsigned int header=((unsigned int *)mem)[-1], mask1=0x8080E100, result1, mask2=0xFFFFFF06, result2;
result1=header & mask1; /* Positive testing for NT heap */
result2=header & mask2; /* Positive testing for dlmalloc */
if(result1==0x00000100 && result2!=0x00000102)
{ /* This is likely a NT heap block */
return 0;
}
#endif
#ifdef __linux__
/* On Linux glibc uses ptmalloc2 (really dlmalloc) just as we do, but prev_foot contains rubbish
when the preceding block is allocated because ptmalloc2 finds the local mstate by rounding the ptr
down to the nearest megabyte. It's like dlmalloc with FOOTERS disabled. */
mchunkptr p=mem2chunk(mem);
mstate fm=get_mstate_for(p);
/* If it's a ptmalloc2 block, fm is likely to be some crazy value */
if(!is_aligned(fm)) return 0;
if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0;
if(ok_magic(fm))
return fm;
else
return 0;
if(1) { }
#endif
else
{
mchunkptr p=mem2chunk(mem);
mstate fm=get_mstate_for(p);
assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */
if(ok_magic(fm))
return fm;
}
#else
#ifdef WIN32
#ifdef _MSC_VER
__try
#elif defined(__MINGW32__)
__try1
#endif
#endif
{
/* We try to return zero here if it isn't one of our own blocks, however
the current block annotation scheme used by dlmalloc makes it impossible
to be absolutely sure of avoiding a segfault.
mchunkptr->prev_foot = mem-(2*size_t) = mstate ^ mparams.magic for PRECEDING block;
mchunkptr->head = mem-(1*size_t) = 8 multiple size of this block with bottom three bits = FLAG_BITS
FLAG_BITS = bit 0 is CINUSE (currently in use unless is mmap), bit 1 is PINUSE (previous block currently
in use unless mmap), bit 2 is UNUSED and currently is always zero.
*/
register void *RESTRICT leastusedaddress_=leastusedaddress; /* Cache these to avoid register reloading */
register size_t largestusedblock_=largestusedblock;
if(!is_aligned(mem)) return 0; /* Would fail very rarely as all allocators return aligned blocks */
if(mem<leastusedaddress_) return 0; /* Simple but effective */
{
mchunkptr p=mem2chunk(mem);
mstate fm=0;
int ismmapped=is_mmapped(p);
if((!ismmapped && !is_inuse(p)) || (p->head & FLAG4_BIT)) return 0;
/* Reduced uncertainty by 0.5^2 = 25.0% */
/* size should never exceed largestusedblock */
if(chunksize(p)-overhead_for(p)>largestusedblock_) return 0;
/* Reduced uncertainty by a minimum of 0.5^3 = 12.5%, maximum 0.5^16 = 0.0015% */
/* Having sanity checked prev_foot and head, check next block */
if(!ismmapped && (!next_pinuse(p) || (next_chunk(p)->head & FLAG4_BIT))) return 0;
/* Reduced uncertainty by 0.5^5 = 3.13% or 0.5^18 = 0.00038% */
#if 0
/* If previous block is free, check that its next block pointer equals us */
if(!ismmapped && !pinuse(p))
if(next_chunk(prev_chunk(p))!=p) return 0;
/* We could start comparing prev_foot's for similarity but it starts getting slow. */
#endif
fm = get_mstate_for(p);
if(!is_aligned(fm) || (void *)fm<leastusedaddress_) return 0;
#if 0
/* See if mem is lower in memory than mem */
if((size_t)mem-(size_t)fm>=(size_t)1<<(SIZE_T_BITSIZE-1)) return 0;
#endif
assert(ok_magic(fm)); /* If this fails, someone tried to free a block twice */
if(ok_magic(fm))
return fm;
}
}
#ifdef WIN32
#ifdef _MSC_VER
__except(1) { }
#elif defined(__MINGW32__)
__except1(1) { }
#endif
#endif
#endif
#endif
#endif
}
return 0;
}
NEDMALLOCNOALIASATTR size_t nedblksize(int *RESTRICT isforeign, void *RESTRICT mem, unsigned flags) THROWSPEC
{
if(mem)
{
if(isforeign) *isforeign=1;
#if USE_MAGIC_HEADERS
{
size_t *_mem=(size_t *) mem-3;
if(_mem[0]==*(size_t *) "NEDMALOC")
{
mstate mspace=(mstate) _mem[1];
size_t size=_mem[2];
if(isforeign) *isforeign=0;
return size;
}
}
#elif USE_ALLOCATOR==1
if((flags & NM_SKIP_TOLERANCE_CHECKS) || nedblkmstate(mem))
{
mchunkptr p=mem2chunk(mem);
if(isforeign) *isforeign=0;
return chunksize(p)-overhead_for(p);
}
#ifdef DEBUG
else
{
int a=1; /* Set breakpoints here if needed */
}
#endif
#endif
#if defined(ENABLE_TOLERANT_NEDMALLOC) || USE_ALLOCATOR==0
return sysblksize(mem);
#endif
}
return 0;
}
NEDMALLOCNOALIASATTR size_t nedmemsize(void *RESTRICT mem) THROWSPEC { return nedblksize(0, mem, 0); }
NEDMALLOCNOALIASATTR void nedsetvalue(void *v) THROWSPEC { nedpsetvalue((nedpool *) 0, v); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc((nedpool *) 0, size); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc((nedpool *) 0, no, size); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc((nedpool *) 0, mem, size); }
NEDMALLOCNOALIASATTR void nedfree(void *mem) THROWSPEC { nedpfree((nedpool *) 0, mem); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign((nedpool *) 0, alignment, bytes); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedmalloc2(size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedpmalloc2((nedpool *) 0, size, alignment, flags); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedrealloc2(void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC { return nedprealloc2((nedpool *) 0, mem, size, alignment, flags); }
NEDMALLOCNOALIASATTR void nedfree2(void *mem, unsigned flags) THROWSPEC { nedpfree2((nedpool *) 0, mem, flags); }
NEDMALLOCNOALIASATTR struct nedmallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo((nedpool *) 0); }
NEDMALLOCNOALIASATTR int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt((nedpool *) 0, parno, value); }
NEDMALLOCNOALIASATTR int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim((nedpool *) 0, pad); }
void nedmalloc_stats() THROWSPEC { nedpmalloc_stats((nedpool *) 0); }
NEDMALLOCNOALIASATTR size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint((nedpool *) 0); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc((nedpool *) 0, elemsno, elemsize, chunks); }
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc((nedpool *) 0, elems, sizes, chunks); }
#ifdef WIN32
typedef unsigned __int64 timeCount;
static timeCount GetTimestamp()
{
static LARGE_INTEGER ticksPerSec;
static double scalefactor;
static timeCount baseCount;
LARGE_INTEGER val;
timeCount ret;
if(!scalefactor)
{
if(QueryPerformanceFrequency(&ticksPerSec))
scalefactor=ticksPerSec.QuadPart/1000000000000.0;
else
scalefactor=1;
}
if(!QueryPerformanceCounter(&val))
return (timeCount) GetTickCount() * 1000000000;
ret=(timeCount) (val.QuadPart/scalefactor);
if(!baseCount) baseCount=ret;
return ret-baseCount;
}
#else
#include <sys/time.h>
typedef unsigned long long timeCount;
static timeCount GetTimestamp()
{
static timeCount baseCount;
timeCount ret;
#ifdef CLOCK_MONOTONIC
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
ret=((timeCount) ts.tv_sec*1000000000000LL)+ts.tv_nsec*1000LL;
#else
struct timeval tv;
gettimeofday(&tv, 0);
ret=((timeCount) tv.tv_sec*1000000000000LL)+tv.tv_usec*1000000LL;
#endif
if(!baseCount) baseCount=ret;
return ret-baseCount;
}
#endif
/* Set ENABLE_LOGGING to an AND mask of which of these you want to
log, so set it to 0xffffffff for everything */
typedef enum LogEntryType_t
{
LOGENTRY_MALLOC =(1<<0),
LOGENTRY_REALLOC =(1<<1),
LOGENTRY_FREE =(1<<2),
LOGENTRY_THREADCACHE_MALLOC =(1<<3),
LOGENTRY_THREADCACHE_FREE =(1<<4),
LOGENTRY_THREADCACHE_CLEAN =(1<<5),
LOGENTRY_POOL_MALLOC =(1<<6),
LOGENTRY_POOL_REALLOC =(1<<7),
LOGENTRY_POOL_FREE =(1<<8)
} LogEntryType;
#if NEDMALLOC_STACKBACKTRACEDEPTH
typedef struct StackFrameType_t
{
void *pc;
char module[64];
char functname[256];
char file[96];
int lineno;
} StackFrameType;
#endif
typedef struct logentry_t
{
timeCount timestamp;
nedpool *np;
LogEntryType type;
int mspace;
size_t size;
void *mem;
size_t alignment;
unsigned flags;
void *returned;
#if NEDMALLOC_STACKBACKTRACEDEPTH
StackFrameType stack[NEDMALLOC_STACKBACKTRACEDEPTH];
#endif
} logentry;
static const char *LogEntryTypeStrings[]={
"LOGENTRY_MALLOC",
"LOGENTRY_REALLOC",
"LOGENTRY_FREE",
"LOGENTRY_THREADCACHE_MALLOC",
"LOGENTRY_THREADCACHE_FREE",
"LOGENTRY_THREADCACHE_CLEAN",
"LOGENTRY_POOL_MALLOC",
"LOGENTRY_POOL_REALLOC",
"LOGENTRY_POOL_FREE",
"******************"
};
struct threadcacheblk_t;
typedef struct threadcacheblk_t threadcacheblk;
struct threadcacheblk_t
{ /* Keep less than 32 bytes as sizeof(threadcacheblk) is the minimum allocation size */
#ifdef FULLSANITYCHECKS
unsigned int magic;
#endif
int isforeign;
unsigned int lastUsed;
size_t size;
threadcacheblk *RESTRICT next, *RESTRICT prev;
};
typedef struct threadcache_t
{
#ifdef FULLSANITYCHECKS
unsigned int magic1;
#endif
int mymspace; /* Last mspace entry this thread used */
long threadid;
unsigned int mallocs, frees, successes;
#if ENABLE_LOGGING
logentry *logentries, *logentriesptr, *logentriesend;
#endif
size_t freeInCache; /* How much free space is stored in this cache */
threadcacheblk *RESTRICT bins[(THREADCACHEMAXBINS+1)*2];
#ifdef FULLSANITYCHECKS
unsigned int magic2;
#endif
} threadcache;
struct nedpool_t
{
#if USE_LOCKS
MLOCK_T mutex;
#endif
void *uservalue;
int threads; /* Max entries in m to use */
threadcache *RESTRICT caches[THREADCACHEMAXCACHES];
TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */
mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */
};
static nedpool syspool;
#if ENABLE_LOGGING
#if NEDMALLOC_STACKBACKTRACEDEPTH
#if defined(WIN32) && defined(_MSC_VER)
#define COPY_STRING(d, s, maxlen) { size_t len=strlen(s); len=(len>maxlen) ? maxlen-1 : len; memcpy(d, s, len); d[len]=0; }
#pragma optimize("g", off)
static int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep, CONTEXT *ct) THROWSPEC
{
*ct=*ep->ContextRecord;
return EXCEPTION_EXECUTE_HANDLER;
}
static DWORD64 __stdcall GetModBase(HANDLE hProcess, DWORD64 dwAddr) THROWSPEC
{
DWORD64 modulebase;
// Try to get the module base if already loaded, otherwise load the module
modulebase=SymGetModuleBase64(hProcess, dwAddr);
if(modulebase)
return modulebase;
else
{
MEMORY_BASIC_INFORMATION stMBI ;
if ( 0 != VirtualQueryEx ( hProcess, (LPCVOID)(size_t)dwAddr, &stMBI, sizeof(stMBI)))
{
int n;
DWORD dwPathLen=0, dwNameLen=0 ;
TCHAR szFile[ MAX_PATH ], szModuleName[ MAX_PATH ] ;
MODULEINFO mi={0};
dwPathLen = GetModuleFileName ( (HMODULE) stMBI.AllocationBase , szFile, MAX_PATH );
dwNameLen = GetModuleBaseName (hProcess, (HMODULE) stMBI.AllocationBase , szModuleName, MAX_PATH );
for(n=dwNameLen; n>0; n--)
{
if(szModuleName[n]=='.')
{
szModuleName[n]=0;
break;
}
}
if(!GetModuleInformation(hProcess, (HMODULE) stMBI.AllocationBase, &mi, sizeof(mi)))
{
//fxmessage("WARNING: GetModuleInformation() returned error code %d\n", GetLastError());
}
if(!SymLoadModule64 ( hProcess, NULL, (PSTR)( (dwPathLen) ? szFile : 0), (PSTR)( (dwNameLen) ? szModuleName : 0),
(DWORD64) mi.lpBaseOfDll, mi.SizeOfImage))
{
//fxmessage("WARNING: SymLoadModule64() returned error code %d\n", GetLastError());
}
//fxmessage("%s, %p, %x, %x\n", szFile, mi.lpBaseOfDll, mi.SizeOfImage, (DWORD) mi.lpBaseOfDll+mi.SizeOfImage);
modulebase=SymGetModuleBase64(hProcess, dwAddr);
return modulebase;
}
}
return 0;
}
extern HANDLE sym_myprocess;
extern VOID (WINAPI *RtlCaptureContextAddr)(PCONTEXT);
extern void DeinitSym(void) THROWSPEC;
static void DoStackWalk(logentry *p) THROWSPEC
{
int i,i2;
HANDLE mythread=(HANDLE) GetCurrentThread();
STACKFRAME64 sf={ 0 };
CONTEXT ct={ 0 };
if(!sym_myprocess)
{
DWORD symopts;
DuplicateHandle(GetCurrentProcess(), GetCurrentProcess(), GetCurrentProcess(), &sym_myprocess, 0, FALSE, DUPLICATE_SAME_ACCESS);
symopts=SymGetOptions();
SymSetOptions(symopts /*| SYMOPT_DEFERRED_LOADS*/ | SYMOPT_LOAD_LINES);
SymInitialize(sym_myprocess, NULL, TRUE);
atexit(DeinitSym);
}
ct.ContextFlags=CONTEXT_FULL;
// Use RtlCaptureContext() if we have it as it saves an exception throw
if((VOID (WINAPI *)(PCONTEXT)) -1==RtlCaptureContextAddr)
RtlCaptureContextAddr=(VOID (WINAPI *)(PCONTEXT)) GetProcAddress(GetModuleHandle(L"kernel32"), "RtlCaptureContext");
if(RtlCaptureContextAddr)
RtlCaptureContextAddr(&ct);
else
{ // This is nasty, but it works
__try
{
int *foo=0;
*foo=78;
}
__except (ExceptionFilter(GetExceptionCode(), GetExceptionInformation(), &ct))
{
}
}
sf.AddrPC.Mode=sf.AddrStack.Mode=sf.AddrFrame.Mode=AddrModeFlat;
#if !(defined(_M_AMD64) || defined(_M_X64))
sf.AddrPC.Offset =ct.Eip;
sf.AddrStack.Offset=ct.Esp;
sf.AddrFrame.Offset=ct.Ebp;
#else
sf.AddrPC.Offset =ct.Rip;
sf.AddrStack.Offset=ct.Rsp;
sf.AddrFrame.Offset=ct.Rbp; // maybe Rdi?
#endif
for(i2=0; i2<NEDMALLOC_STACKBACKTRACEDEPTH; i2++)
{
IMAGEHLP_MODULE64 ihm={ sizeof(IMAGEHLP_MODULE64) };
char temp[MAX_PATH+sizeof(IMAGEHLP_SYMBOL64)];
IMAGEHLP_SYMBOL64 *ihs;
IMAGEHLP_LINE64 ihl={ sizeof(IMAGEHLP_LINE64) };
DWORD64 offset;
if(!StackWalk64(
#if !(defined(_M_AMD64) || defined(_M_X64))
IMAGE_FILE_MACHINE_I386,
#else
IMAGE_FILE_MACHINE_AMD64,
#endif
sym_myprocess, mythread, &sf, &ct, NULL, SymFunctionTableAccess64, GetModBase, NULL))
break;
if(0==sf.AddrPC.Offset)
break;
i=i2;
if(i) // Skip first entry relating to this function
{
DWORD lineoffset=0;
p->stack[i-1].pc=(void *)(size_t) sf.AddrPC.Offset;
if(SymGetModuleInfo64(sym_myprocess, sf.AddrPC.Offset, &ihm))
{
char *leaf;
leaf=strrchr(ihm.ImageName, '\\');
if(!leaf) leaf=ihm.ImageName-1;
COPY_STRING(p->stack[i-1].module, leaf+1, sizeof(p->stack[i-1].module));
}
else strcpy(p->stack[i-1].module, "<unknown>");
//fxmessage("WARNING: SymGetModuleInfo64() returned error code %d\n", GetLastError());
memset(temp, 0, MAX_PATH+sizeof(IMAGEHLP_SYMBOL64));
ihs=(IMAGEHLP_SYMBOL64 *) temp;
ihs->SizeOfStruct=sizeof(IMAGEHLP_SYMBOL64);
ihs->Address=sf.AddrPC.Offset;
ihs->MaxNameLength=MAX_PATH;
if(SymGetSymFromAddr64(sym_myprocess, sf.AddrPC.Offset, &offset, ihs))
{
COPY_STRING(p->stack[i-1].functname, ihs->Name, sizeof(p->stack[i-1].functname));
if(strlen(p->stack[i-1].functname)<sizeof(p->stack[i-1].functname)-8)
{
sprintf(strchr(p->stack[i-1].functname, 0), " +0x%x", offset);
}
}
else
strcpy(p->stack[i-1].functname, "<unknown>");
if(SymGetLineFromAddr64(sym_myprocess, sf.AddrPC.Offset, &lineoffset, &ihl))
{
char *leaf;
p->stack[i-1].lineno=ihl.LineNumber;
leaf=strrchr(ihl.FileName, '\\');
if(!leaf) leaf=ihl.FileName-1;
COPY_STRING(p->stack[i-1].file, leaf+1, sizeof(p->stack[i-1].file));
}
else
strcpy(p->stack[i-1].file, "<unknown>");
}
}
}
#pragma optimize("g", on)
#else
static void DoStackWalk(logentry *p) THROWSPEC
{
void *backtr[NEDMALLOC_STACKBACKTRACEDEPTH];
size_t size;
char **strings;
size_t i2;
size=backtrace(backtr, NEDMALLOC_STACKBACKTRACEDEPTH);
strings=backtrace_symbols(backtr, size);
for(i2=0; i2<size; i2++)
{ // Format can be <file path>(<mangled symbol>+0x<offset>) [<pc>]
// or can be <file path> [<pc>]
int start=0, end=strlen(strings[i2]), which=0, idx;
for(idx=0; idx<end; idx++)
{
if(0==which && (' '==strings[i2][idx] || '('==strings[i2][idx]))
{
int len=FXMIN(idx-start, (int) sizeof(p->stack[i2].file));
memcpy(p->stack[i2].file, strings[i2]+start, len);
p->stack[i2].file[len]=0;
which=(' '==strings[i2][idx]) ? 2 : 1;
start=idx+1;
}
else if(1==which && ')'==strings[i2][idx])
{
FXString functname(strings[i2]+start, idx-start);
FXint offset=functname.rfind("+0x");
FXString rawsymbol(functname.left(offset));
FXString symbol(rawsymbol.length() ? fxdemanglesymbol(rawsymbol, false) : rawsymbol);
symbol.append(functname.mid(offset));
int len=FXMIN(symbol.length(), (int) sizeof(p->stack[i2].functname));
memcpy(p->stack[i2].functname, symbol.text(), len);
p->stack[i2].functname[len]=0;
which=2;
}
else if(2==which && '['==strings[i2][idx])
{
start=idx+1;
which=3;
}
else if(3==which && ']'==strings[i2][idx])
{
FXString v(strings[i2]+start+2, idx-start-2);
p->stack[i2].pc=(void *)(FXuval)v.toULong(0, 16);
}
}
}
free(strings);
}
#endif
#endif
#endif
static FORCEINLINE logentry *LogOperation(threadcache *tc, nedpool *np, LogEntryType type, int mspace, size_t size, void *mem, size_t alignment, unsigned flags, void *returned) THROWSPEC
{
#if ENABLE_LOGGING
if(tc->logentries && NEDMALLOC_TESTLOGENTRY(tc, np, type, mspace, size, mem, alignment, flags, returned))
{
logentry *le;
if(tc->logentriesptr==tc->logentriesend)
{
mchunkptr cp=mem2chunk(tc->logentries);
size_t logentrieslen=chunksize(cp)-overhead_for(cp);
le=(logentry *) CallRealloc(0, tc->logentries, 0, logentrieslen, (logentrieslen*3)/2, 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8));
if(!le) return 0;
tc->logentriesptr=le+(tc->logentriesptr-tc->logentries);
tc->logentries=le;
cp=mem2chunk(tc->logentries);
logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry);
tc->logentriesend=tc->logentries+logentrieslen;
}
le=tc->logentriesptr++;
assert(le+1<=tc->logentriesend);
le->timestamp=GetTimestamp();
le->np=np;
le->type=type;
le->mspace=mspace;
le->size=size;
le->mem=mem;
le->alignment=alignment;
le->flags=flags;
le->returned=returned;
#if NEDMALLOC_STACKBACKTRACEDEPTH
DoStackWalk(le);
#endif
return le;
}
#endif
return 0;
}
static FORCEINLINE NEDMALLOCNOALIASATTR unsigned int size2binidx(size_t _size) THROWSPEC
{ /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */
unsigned int topbit, size=(unsigned int)(_size>>4);
/* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */
#if defined(__GNUC__)
topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size);
#elif defined(_MSC_VER) && _MSC_VER>=1300
{
unsigned long bsrTopBit;
_BitScanReverse(&bsrTopBit, size);
topbit = bsrTopBit;
}
#else
#if 0
union {
unsigned asInt[2];
double asDouble;
};
int n;
asDouble = (double)size + 0.5;
topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023;
#else
{
unsigned int x=size;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >>16);
x = ~x;
x = x - ((x >> 1) & 0x55555555);
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
x = (x + (x >> 4)) & 0x0F0F0F0F;
x = x + (x << 8);
x = x + (x << 16);
topbit=31 - (x >> 24);
}
#endif
#endif
return topbit;
}
#ifdef FULLSANITYCHECKS
static void tcsanitycheck(threadcacheblk *RESTRICT *RESTRICT ptr) THROWSPEC
{
assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1]));
if(ptr[0] && ptr[1])
{
assert(ptr[0]->isforeign || nedblkmstate(ptr[0]));
assert(ptr[1]->isforeign || nedblkmstate(ptr[1]));
assert(nedblksize(0, ptr[0], 0)>=sizeof(threadcacheblk));
assert(nedblksize(0, ptr[1], 0)>=sizeof(threadcacheblk));
assert(*(unsigned int *) "NEDN"==ptr[0]->magic);
assert(*(unsigned int *) "NEDN"==ptr[1]->magic);
assert(!ptr[0]->prev);
assert(!ptr[1]->next);
if(ptr[0]==ptr[1])
{
assert(!ptr[0]->next);
assert(!ptr[1]->prev);
}
}
}
static void tcfullsanitycheck(threadcache *tc) THROWSPEC
{
threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins;
int n;
for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2)
{
threadcacheblk *RESTRICT b, *RESTRICT ob=0;
tcsanitycheck(tcbptr);
for(b=tcbptr[0]; b; ob=b, b=b->next)
{
assert(b->isforeign || nedblkmstate(b));
assert(nedblksize(0, b, 0)>=sizeof(threadcacheblk));
assert(*(unsigned int *) "NEDN"==b->magic);
assert(!ob || ob->next==b);
assert(!ob || b->prev==ob);
}
}
}
#endif
static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC;
static NOINLINE void RemoveCacheEntries(nedpool *RESTRICT p, threadcache *RESTRICT tc, unsigned int age) THROWSPEC
{
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
if(tc->freeInCache)
{
threadcacheblk *RESTRICT *RESTRICT tcbptr=tc->bins;
int n;
for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2)
{
threadcacheblk *RESTRICT *RESTRICT tcb=tcbptr+1; /* come from oldest end of list */
/*tcsanitycheck(tcbptr);*/
for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; )
{
threadcacheblk *RESTRICT f=*tcb;
size_t blksize=f->size; /*nedblksize(f);*/
assert(blksize<=nedblksize(0, f, 0));
assert(blksize);
#ifdef FULLSANITYCHECKS
assert(*(unsigned int *) "NEDN"==(*tcb)->magic);
#endif
*tcb=(*tcb)->prev;
if(*tcb)
(*tcb)->next=0;
else
*tcbptr=0;
tc->freeInCache-=blksize;
assert((long) tc->freeInCache>=0);
CallFree(0, f, f->isforeign);
/*tcsanitycheck(tcbptr);*/
LogOperation(tc, p, LOGENTRY_THREADCACHE_CLEAN, age, blksize, f, 0, 0, 0);
}
}
}
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
}
size_t nedflushlogs(nedpool *p, char *filepath) THROWSPEC
{
size_t count=0;
if(!p)
{
p=&syspool;
if(!syspool.threads) InitPool(&syspool, 0, -1);
}
if(p->caches)
{
threadcache *tc;
int n;
#if ENABLE_LOGGING
int haslogentries=0;
#endif
for(n=0; n<THREADCACHEMAXCACHES; n++)
{
if((tc=p->caches[n]))
{
count+=tc->freeInCache;
tc->frees++;
RemoveCacheEntries(p, tc, 0);
assert(!tc->freeInCache);
#if ENABLE_LOGGING
haslogentries|=!!tc->logentries;
#endif
}
}
#if ENABLE_LOGGING
if(haslogentries)
{
char buffer[MAX_PATH]=NEDMALLOC_LOGFILE;
FILE *oh;
fpos_t pos1, pos2;
if(!filepath) filepath=buffer;
oh=fopen(filepath, "r+");
while(!oh)
{
char *bptr;
if((oh=fopen(filepath, "w"))) break;
if(ENOSPC==errno) break;
bptr=strrchr(filepath, '.');
if(bptr-filepath>=MAX_PATH-6) abort();
memcpy(bptr, "!.csv", 6);
}
if(oh)
{
fgetpos(oh, &pos1);
fseek(oh, 0, SEEK_END);
fgetpos(oh, &pos2);
if(pos1==pos2)
fprintf(oh, "Timestamp, Pool, Operation, MSpace, Size, Block, Alignment, Flags, Returned,\"Stack Backtrace\"\n");
for(n=0; n<THREADCACHEMAXCACHES; n++)
{
if((tc=p->caches[n]) && tc->logentries)
{
logentry *le;
for(le=tc->logentries; le<tc->logentriesptr; le++)
{
const char *LogEntryTypeString=LogEntryTypeStrings[size2binidx(((size_t)le->type)<<4)];
char stackbacktrace[16384]="?";
#if NEDMALLOC_STACKBACKTRACEDEPTH
char *sbtp=stackbacktrace;
int i;
for(i=0; i<NEDMALLOC_STACKBACKTRACEDEPTH && le->stack[i].pc; i++)
{
sbtp+=sprintf(sbtp, "0x%p:%s:%s (%s:%u),",
le->stack[i].pc, le->stack[i].module, le->stack[i].functname, le->stack[i].file, le->stack[i].lineno);
if(sbtp>=stackbacktrace+sizeof(stackbacktrace)) abort();
}
if(NEDMALLOC_STACKBACKTRACEDEPTH==i)
strcpy(sbtp, "<backtrace may continue ...>");
else
strcpy(sbtp, "<backtrace ends>");
if(strchr(sbtp, 0)>=stackbacktrace+sizeof(stackbacktrace)) abort();
#endif
fprintf(oh, "%llu, 0x%p, %s, %d, %Iu, 0x%p, %Iu, 0x%x, 0x%p,\"%s\"\n",
le->timestamp, le->np, LogEntryTypeString, le->mspace, le->size, le->mem, le->alignment, le->flags, le->returned, stackbacktrace);
}
CallFree(0, tc->logentries, 0);
tc->logentries=tc->logentriesptr=tc->logentriesend=0;
}
}
fclose(oh);
}
}
#endif
}
return count;
}
static void DestroyCaches(nedpool *RESTRICT p) THROWSPEC
{
if(p->caches)
{
threadcache *tc;
int n;
nedflushlogs(p, 0);
for(n=0; n<THREADCACHEMAXCACHES; n++)
{
if((tc=p->caches[n]))
{
tc->mymspace=-1;
tc->threadid=0;
CallFree(0, tc, 0);
p->caches[n]=0;
}
}
}
}
static NOINLINE threadcache *AllocCache(nedpool *RESTRICT p) THROWSPEC
{
threadcache *tc=0;
int n, end;
#if USE_LOCKS
ACQUIRE_LOCK(&p->mutex);
#endif
for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++);
if(THREADCACHEMAXCACHES==n)
{ /* List exhausted, so disable for this thread */
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
return 0;
}
tc=p->caches[n]=(threadcache *) CallMalloc(p->m[0], sizeof(threadcache), 0, M2_ZERO_MEMORY);
if(!tc)
{
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
return 0;
}
#ifdef FULLSANITYCHECKS
tc->magic1=*(unsigned int *)"NEDMALC1";
tc->magic2=*(unsigned int *)"NEDMALC2";
#endif
tc->threadid=
#if USE_LOCKS
(long)(size_t)CURRENT_THREAD;
#else
1;
#endif
for(end=0; p->m[end]; end++);
tc->mymspace=abs(tc->threadid) % end;
#if ENABLE_LOGGING
{
mchunkptr cp;
size_t logentrieslen=2048/sizeof(logentry); /* One page */
tc->logentries=tc->logentriesptr=(logentry *) CallMalloc(p->m[0], logentrieslen*sizeof(logentry), 0, M2_ZERO_MEMORY|M2_ALWAYS_MMAP|M2_RESERVE_MULT(8));
if(!tc->logentries)
{
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
return 0;
}
cp=mem2chunk(tc->logentries);
logentrieslen=(chunksize(cp)-overhead_for(cp))/sizeof(logentry);
tc->logentriesend=tc->logentries+logentrieslen;
}
#endif
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
#endif
if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort();
return tc;
}
static void *threadcache_malloc(nedpool *RESTRICT p, threadcache *RESTRICT tc, size_t *RESTRICT _size) THROWSPEC
{
void *RESTRICT ret=0;
size_t size=*_size, blksize=0;
unsigned int bestsize;
unsigned int idx=size2binidx(size);
threadcacheblk *RESTRICT blk, *RESTRICT *RESTRICT binsptr;
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
/* Calculate best fit bin size */
bestsize=1<<(idx+4);
#if 0
/* Finer grained bin fit */
idx<<=1;
if(size>bestsize)
{
idx++;
bestsize+=bestsize>>1;
}
if(size>bestsize)
{
idx++;
bestsize=1<<(4+(idx>>1));
}
#else
if(size>bestsize)
{
idx++;
bestsize<<=1;
}
#endif
assert(bestsize>=size);
if(size<bestsize) size=bestsize;
assert(size<=THREADCACHEMAX);
assert(idx<=THREADCACHEMAXBINS);
binsptr=&tc->bins[idx*2];
/* Try to match close, but move up a bin if necessary */
blk=*binsptr;
if(!blk || blk->size<size)
{ /* Bump it up a bin */
if(idx<THREADCACHEMAXBINS)
{
idx++;
binsptr+=2;
blk=*binsptr;
}
}
if(blk)
{
blksize=blk->size; /*nedblksize(blk);*/
assert(nedblksize(0, blk, 0)>=blksize);
assert(blksize>=size);
if(blk->next)
blk->next->prev=0;
*binsptr=blk->next;
if(!*binsptr)
binsptr[1]=0;
#ifdef FULLSANITYCHECKS
blk->magic=0;
#endif
assert(binsptr[0]!=blk && binsptr[1]!=blk);
assert(nedblksize(0, blk, 0)>=sizeof(threadcacheblk) && nedblksize(0, blk, 0)<=THREADCACHEMAX+CHUNK_OVERHEAD);
/*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) _size);*/
ret=(void *) blk;
}
++tc->mallocs;
if(ret)
{
assert(blksize>=size);
++tc->successes;
tc->freeInCache-=blksize;
assert((long) tc->freeInCache>=0);
}
#if defined(DEBUG) && 0
if(!(tc->mallocs & 0xfff))
{
printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs,
(float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache);
}
#endif
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
*_size=size;
return ret;
}
static NOINLINE void ReleaseFreeInCache(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace) THROWSPEC
{
unsigned int age=THREADCACHEMAXFREESPACE/8192;
#if USE_LOCKS
/*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/
#endif
while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE)
{
RemoveCacheEntries(p, tc, age);
/*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/
age>>=1;
}
#if USE_LOCKS
/*RELEASE_LOCK(&p->m[mymspace]->mutex);*/
#endif
}
static void threadcache_free(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, void *RESTRICT mem, size_t size, int isforeign) THROWSPEC
{
unsigned int bestsize;
unsigned int idx=size2binidx(size);
threadcacheblk *RESTRICT *RESTRICT binsptr, *RESTRICT tck=(threadcacheblk *RESTRICT) mem;
assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD);
#ifdef DEBUG
/* Make sure this is a valid memory block */
assert(nedblksize(0, mem, 0));
#endif
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
/* Calculate best fit bin size */
bestsize=1<<(idx+4);
#if 0
/* Finer grained bin fit */
idx<<=1;
if(size>bestsize)
{
unsigned int biggerbestsize=bestsize+bestsize<<1;
if(size>=biggerbestsize)
{
idx++;
bestsize=biggerbestsize;
}
}
#endif
if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */
size=bestsize;
binsptr=&tc->bins[idx*2];
assert(idx<=THREADCACHEMAXBINS);
if(tck==*binsptr)
{
fprintf(stderr, "nedmalloc: Attempt to free already freed memory block %p - aborting!\n", tck);
abort();
}
#ifdef FULLSANITYCHECKS
tck->magic=*(unsigned int *) "NEDN";
#endif
tck->isforeign=isforeign;
tck->lastUsed=++tc->frees;
tck->size=(unsigned int) size;
tck->next=*binsptr;
tck->prev=0;
if(tck->next)
tck->next->prev=tck;
else
binsptr[1]=tck;
assert(!*binsptr || (*binsptr)->size==tck->size);
*binsptr=tck;
assert(tck==tc->bins[idx*2]);
assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck);
/*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/
tc->freeInCache+=size;
#ifdef FULLSANITYCHECKS
tcfullsanitycheck(tc);
#endif
#if 1
if(tc->freeInCache>=THREADCACHEMAXFREESPACE)
ReleaseFreeInCache(p, tc, mymspace);
#endif
}
static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC
{ /* threads is -1 for system pool */
ensure_initialization();
ACQUIRE_MALLOC_GLOBAL_LOCK();
if(p->threads) goto done;
#if USE_LOCKS
if(INITIAL_LOCK(&p->mutex)) goto err;
#endif
if(TLSALLOC(&p->mycache)) goto err;
#if USE_ALLOCATOR==0
p->m[0]=(mstate) mspacecounter++;
#elif USE_ALLOCATOR==1
if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err;
p->m[0]->extp=p;
#endif
p->threads=(threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : (!threads) ? DEFAULTMAXTHREADSINPOOL : threads;
done:
RELEASE_MALLOC_GLOBAL_LOCK();
return 1;
err:
if(threads<0)
abort(); /* If you can't allocate for system pool, we're screwed */
DestroyCaches(p);
if(p->m[0])
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[0]);
#endif
p->m[0]=0;
}
if(p->mycache)
{
if(TLSFREE(p->mycache)) abort();
p->mycache=0;
}
RELEASE_MALLOC_GLOBAL_LOCK();
return 0;
}
static NOINLINE mstate FindMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int *RESTRICT lastUsed, size_t size) THROWSPEC
{ /* Gets called when thread's last used mspace is in use. The strategy
is to run through the list of all available mspaces looking for an
unlocked one and if we fail, we create a new one so long as we don't
exceed p->threads */
int n, end;
n=end=*lastUsed+1;
#if USE_LOCKS
for(; p->m[n]; end=++n)
{
if(TRY_LOCK(&p->m[n]->mutex)) goto found;
}
for(n=0; n<*lastUsed && p->m[n]; n++)
{
if(TRY_LOCK(&p->m[n]->mutex)) goto found;
}
if(end<p->threads)
{
mstate temp;
#if USE_ALLOCATOR==0
temp=(mstate) mspacecounter++;
#elif USE_ALLOCATOR==1
if(!(temp=(mstate) create_mspace(size, 1)))
goto badexit;
#endif
/* Now we're ready to modify the lists, we lock */
ACQUIRE_LOCK(&p->mutex);
while(p->m[end] && end<p->threads)
end++;
if(end>=p->threads)
{ /* Drat, must destroy it now */
RELEASE_LOCK(&p->mutex);
#if USE_ALLOCATOR==1
destroy_mspace((mstate) temp);
#endif
goto badexit;
}
/* We really want to make sure this goes into memory now but we
have to be careful of breaking aliasing rules, so write it twice */
{
volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end];
*_m=(p->m[end]=temp);
}
ACQUIRE_LOCK(&p->m[end]->mutex);
/*printf("Created mspace idx %d\n", end);*/
RELEASE_LOCK(&p->mutex);
n=end;
goto found;
}
/* Let it lock on the last one it used */
badexit:
ACQUIRE_LOCK(&p->m[*lastUsed]->mutex);
return p->m[*lastUsed];
#endif
found:
*lastUsed=n;
if(tc)
tc->mymspace=n;
else
{
if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort();
}
return p->m[n];
}
typedef struct PoolList_t
{
size_t size; /* Size of list */
size_t length; /* Actual entries in list */
#ifdef DEBUG
nedpool *list[1]; /* Force testing of list expansion */
#else
nedpool *list[16];
#endif
} PoolList;
#if USE_LOCKS
static MLOCK_T poollistlock;
#endif
static PoolList *poollist;
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC
{
nedpool *ret=0;
if(!poollist)
{
PoolList *newpoollist=0;
if(!(newpoollist=(PoolList *) nedpcalloc(0, 1, sizeof(PoolList)+sizeof(nedpool *)))) return 0;
#if USE_LOCKS
INITIAL_LOCK(&poollistlock);
ACQUIRE_LOCK(&poollistlock);
#endif
poollist=newpoollist;
poollist->size=sizeof(poollist->list)/sizeof(nedpool *);
}
#if USE_LOCKS
else
ACQUIRE_LOCK(&poollistlock);
#endif
if(poollist->length==poollist->size)
{
PoolList *newpoollist=0;
size_t newsize=0;
newsize=sizeof(PoolList)+(poollist->size+1)*sizeof(nedpool *);
if(!(newpoollist=(PoolList *) nedprealloc(0, poollist, newsize))) goto badexit;
poollist=newpoollist;
memset(&poollist->list[poollist->size], 0, newsize-((size_t)&poollist->list[poollist->size]-(size_t)&poollist->list[0]));
poollist->size=((newsize-((char *)&poollist->list[0]-(char *)poollist))/sizeof(nedpool *))-1;
assert(poollist->size>poollist->length);
}
if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) goto badexit;
if(!InitPool(ret, capacity, threads))
{
nedpfree(0, ret);
goto badexit;
}
poollist->list[poollist->length++]=ret;
badexit:
{
#if USE_LOCKS
RELEASE_LOCK(&poollistlock);
#endif
}
return ret;
}
void neddestroypool(nedpool *p) THROWSPEC
{
unsigned int n;
#if USE_LOCKS
ACQUIRE_LOCK(&p->mutex);
#endif
DestroyCaches(p);
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[n]);
#endif
p->m[n]=0;
}
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
DESTROY_LOCK(&p->mutex);
#endif
if(TLSFREE(p->mycache)) abort();
nedpfree(0, p);
#if USE_LOCKS
ACQUIRE_LOCK(&poollistlock);
#endif
assert(poollist);
for(n=0; n<poollist->length && poollist->list[n]!=p; n++);
assert(n!=poollist->length);
memmove(&poollist->list[n], &poollist->list[n+1], (size_t)&poollist->list[poollist->length]-(size_t)&poollist->list[n]);
if(!--poollist->length)
{
assert(!poollist->list[0]);
nedpfree(0, poollist);
poollist=0;
}
#if USE_LOCKS
RELEASE_LOCK(&poollistlock);
#endif
}
void neddestroysyspool() THROWSPEC
{
nedpool *p=&syspool;
int n;
#if USE_LOCKS
ACQUIRE_LOCK(&p->mutex);
#endif
DestroyCaches(p);
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[n]);
#endif
p->m[n]=0;
}
/* Render syspool unusable */
for(n=0; n<THREADCACHEMAXCACHES; n++)
p->caches[n]=(threadcache *)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL);
for(n=0; n<MAXTHREADSINPOOL+1; n++)
p->m[n]=(mstate)(size_t)(sizeof(size_t)>4 ? 0xdeadbeefdeadbeefULL : 0xdeadbeefUL);
if(TLSFREE(p->mycache)) abort();
#if USE_LOCKS
RELEASE_LOCK(&p->mutex);
DESTROY_LOCK(&p->mutex);
#endif
}
nedpool **nedpoollist() THROWSPEC
{
nedpool **ret=0;
if(poollist)
{
#if USE_LOCKS
ACQUIRE_LOCK(&poollistlock);
#endif
if(!(ret=(nedpool **) nedmalloc((poollist->length+1)*sizeof(nedpool *)))) goto badexit;
memcpy(ret, poollist->list, (poollist->length+1)*sizeof(nedpool *));
badexit:
{
#if USE_LOCKS
RELEASE_LOCK(&poollistlock);
#endif
}
}
return ret;
}
void nedpsetvalue(nedpool *p, void *v) THROWSPEC
{
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
p->uservalue=v;
}
void *nedgetvalue(nedpool **p, void *mem) THROWSPEC
{
nedpool *np=0;
mstate fm=nedblkmstate(mem);
if(!fm || !fm->extp) return 0;
np=(nedpool *) fm->extp;
if(p) *p=np;
return np->uservalue;
}
void nedtrimthreadcache(nedpool *p, int disable) THROWSPEC
{
int mycache;
if(!p)
{
p=&syspool;
if(!syspool.threads) InitPool(&syspool, 0, -1);
}
mycache=(int)(size_t) TLSGET(p->mycache);
if(!mycache)
{ /* Set to mspace 0 */
if(disable && TLSSET(p->mycache, (void *)(size_t)-1)) abort();
}
else if(mycache>0)
{ /* Set to last used mspace */
threadcache *tc=p->caches[mycache-1];
#if defined(DEBUG)
printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n",
100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs);
#endif
if(disable && TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort();
tc->frees++;
RemoveCacheEntries(p, tc, 0);
assert(!tc->freeInCache);
if(disable)
{
tc->mymspace=-1;
tc->threadid=0;
CallFree(0, p->caches[mycache-1], 0);
p->caches[mycache-1]=0;
}
}
}
void neddisablethreadcache(nedpool *p) THROWSPEC
{
nedtrimthreadcache(p, 1);
}
#if USE_LOCKS && USE_ALLOCATOR==1
#define GETMSPACE(m,p,tc,ms,s,action) \
do \
{ \
mstate m = GetMSpace((p),(tc),(ms),(s)); \
action; \
RELEASE_LOCK(&m->mutex); \
} while (0)
#else
#define GETMSPACE(m,p,tc,ms,s,action) \
do \
{ \
mstate m = GetMSpace((p),(tc),(ms),(s)); \
action; \
} while (0)
#endif
static FORCEINLINE mstate GetMSpace(nedpool *RESTRICT p, threadcache *RESTRICT tc, int mymspace, size_t size) THROWSPEC
{ /* Returns a locked and ready for use mspace */
mstate m=p->m[mymspace];
assert(m);
#if USE_LOCKS && USE_ALLOCATOR==1
if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);
/*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/
#endif
return m;
}
static NOINLINE void GetThreadCache_cold1(nedpool *RESTRICT *RESTRICT p) THROWSPEC
{
*p=&syspool;
if(!syspool.threads) InitPool(&syspool, 0, -1);
}
static NOINLINE void GetThreadCache_cold2(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, int mycache) THROWSPEC
{
if(!mycache)
{ /* Need to allocate a new cache */
*tc=AllocCache(*p);
if(!*tc)
{ /* Disable */
if(TLSSET((*p)->mycache, (void *)(size_t)-1)) abort();
*mymspace=0;
}
else
*mymspace=(*tc)->mymspace;
}
else
{ /* Cache disabled, but we do have an assigned thread pool */
*tc=0;
*mymspace=-mycache-1;
}
}
static FORCEINLINE void GetThreadCache(nedpool *RESTRICT *RESTRICT p, threadcache *RESTRICT *RESTRICT tc, int *RESTRICT mymspace, size_t *RESTRICT size) THROWSPEC
{
int mycache;
#if THREADCACHEMAX
if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk);
#endif
if(!*p)
GetThreadCache_cold1(p);
mycache=(int)(size_t) TLSGET((*p)->mycache);
if(mycache>0)
{ /* Already have a cache */
*tc=(*p)->caches[mycache-1];
*mymspace=(*tc)->mymspace;
}
else GetThreadCache_cold2(p, tc, mymspace, mycache);
assert(*mymspace>=0);
#if USE_LOCKS
assert(!(*tc) || (long)(size_t)CURRENT_THREAD==(*tc)->threadid);
#endif
#ifdef FULLSANITYCHECKS
if(*tc)
{
if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2)
{
abort();
}
}
#endif
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc2(nedpool *p, size_t size, size_t alignment, unsigned flags) THROWSPEC
{
void *ret=0;
threadcache *tc;
int mymspace;
GetThreadCache(&p, &tc, &mymspace, &size);
#if THREADCACHEMAX
if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size<=THREADCACHEMAX)
{ /* Use the thread cache */
if((ret=threadcache_malloc(p, tc, &size)))
{
if((flags & M2_ZERO_MEMORY))
memset(ret, 0, size);
LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, 0, alignment, flags, ret);
}
}
#endif
if(!ret)
{ /* Use this thread's mspace */
GETMSPACE(m, p, tc, mymspace, size,
ret=CallMalloc(m, size, alignment, flags));
if(ret)
LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, size, 0, alignment, flags, ret);
}
LogOperation(tc, p, LOGENTRY_MALLOC, mymspace, size, 0, alignment, flags, ret);
return ret;
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc2(nedpool *p, void *mem, size_t size, size_t alignment, unsigned flags) THROWSPEC
{
void *ret=0;
threadcache *tc;
int mymspace, isforeign=1;
size_t memsize;
if(!mem) return nedpmalloc2(p, size, alignment, flags);
#if REALLOC_ZERO_BYTES_FREES
if(!size)
{
nedpfree2(p, mem, flags);
return 0;
}
#endif
memsize=nedblksize(&isforeign, mem, flags);
assert(memsize);
if(!memsize)
{
fprintf(stderr, "nedmalloc: nedprealloc() called with a block not created by nedmalloc!\n");
abort();
}
else if(size<=memsize && memsize-size<
#ifdef DEBUG
32
#else
1024
#endif
) /* If realloc size is within 1Kb smaller than existing, noop it */
return mem;
GetThreadCache(&p, &tc, &mymspace, &size);
#if THREADCACHEMAX
if(alignment<=MALLOC_ALIGNMENT && !(flags & NM_FLAGS_MASK) && tc && size && size<=THREADCACHEMAX)
{ /* Use the thread cache */
if((ret=threadcache_malloc(p, tc, &size)))
{
size_t tocopy=memsize<size ? memsize : size;
memcpy(ret, mem, tocopy);
if((flags & M2_ZERO_MEMORY) && size>memsize)
memset((void *)((size_t)ret+memsize), 0, size-memsize);
LogOperation(tc, p, LOGENTRY_THREADCACHE_MALLOC, mymspace, size, mem, alignment, flags, ret);
if(!isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD))
{
threadcache_free(p, tc, mymspace, mem, memsize, isforeign);
LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0);
}
else
{
CallFree(0, mem, isforeign);
LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0);
}
}
}
#endif
if(!ret)
{ /* Reallocs always happen in the mspace they happened in, so skip
locking the preferred mspace for this thread */
ret=CallRealloc(p->m[mymspace], mem, isforeign, memsize, size, alignment, flags);
if(ret)
LogOperation(tc, p, LOGENTRY_POOL_REALLOC, mymspace, size, mem, alignment, flags, ret);
}
LogOperation(tc, p, LOGENTRY_REALLOC, mymspace, size, mem, alignment, flags, ret);
return ret;
}
NEDMALLOCNOALIASATTR void nedpfree2(nedpool *p, void *mem, unsigned flags) THROWSPEC
{ /* Frees always happen in the mspace they happened in, so skip
locking the preferred mspace for this thread */
threadcache *tc;
int mymspace, isforeign=1;
size_t memsize;
if(!mem)
{ /* If you tried this on FreeBSD you'd be sorry! */
#ifdef DEBUG
fprintf(stderr, "nedmalloc: WARNING nedpfree() called with zero. This is not portable behaviour!\n");
#endif
return;
}
memsize=nedblksize(&isforeign, mem, flags);
assert(memsize);
if(!memsize)
{
fprintf(stderr, "nedmalloc: nedpfree() called with a block not created by nedmalloc!\n");
abort();
}
GetThreadCache(&p, &tc, &mymspace, 0);
#if THREADCACHEMAX
if(mem && tc && !isforeign && memsize>=sizeof(threadcacheblk) && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD))
{
threadcache_free(p, tc, mymspace, mem, memsize, isforeign);
LogOperation(tc, p, LOGENTRY_THREADCACHE_FREE, mymspace, memsize, mem, 0, 0, 0);
}
else
#endif
{
CallFree(0, mem, isforeign);
LogOperation(tc, p, LOGENTRY_POOL_FREE, mymspace, memsize, mem, 0, 0, 0);
}
LogOperation(tc, p, LOGENTRY_FREE, mymspace, memsize, mem, 0, 0, 0);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC
{
unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, size);
return nedpmalloc2(p, size, 0, flags);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC
{
size_t bytes=no*size;
/* Avoid multiplication overflow. */
if(size && no!=bytes/size)
return 0;
unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes);
return nedpmalloc2(p, bytes, 0, M2_ZERO_MEMORY|flags);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC
{
unsigned flags=NEDMALLOC_FORCERESERVE(p, mem, size);
#if ENABLE_USERMODEPAGEALLOCATOR
/* If the user mode page allocator is turned on in a 32 bit process,
don't automatically reserve eight times the address space. */
if(8==sizeof(size_t) || !OSHavePhysicalPageSupport())
#endif
{ /* If he reallocs even once, it's probably wise to turn on address space reservation.
If the size is larger than mmap_threshold then it'll set the reserve. */
if(!(flags & M2_RESERVE_MASK)) flags=M2_RESERVE_MULT(8);
}
return nedprealloc2(p, mem, size, 0, flags);
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC
{
unsigned flags=NEDMALLOC_FORCERESERVE(p, 0, bytes);
return nedpmalloc2(p, bytes, alignment, flags);
}
NEDMALLOCNOALIASATTR void nedpfree(nedpool *p, void *mem) THROWSPEC
{
nedpfree2(p, mem, 0);
}
struct nedmallinfo nedpmallinfo(nedpool *p) THROWSPEC
{
int n;
struct nedmallinfo ret={0};
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1 && !NO_MALLINFO
struct mallinfo t=mspace_mallinfo(p->m[n]);
ret.arena+=t.arena;
ret.ordblks+=t.ordblks;
ret.hblkhd+=t.hblkhd;
ret.usmblks+=t.usmblks;
ret.uordblks+=t.uordblks;
ret.fordblks+=t.fordblks;
ret.keepcost+=t.keepcost;
#endif
}
return ret;
}
int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC
{
#if USE_ALLOCATOR==1
return mspace_mallopt(parno, value);
#else
return 0;
#endif
}
NEDMALLOCNOALIASATTR void* nedmalloc_internals(size_t *granularity, size_t *magic) THROWSPEC
{
#if USE_ALLOCATOR==1
if(granularity) *granularity=mparams.granularity;
if(magic) *magic=mparams.magic;
return (void *) &syspool;
#else
if(granularity) *granularity=0;
if(magic) *magic=0;
return 0;
#endif
}
int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC
{
int n, ret=0;
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
ret+=mspace_trim(p->m[n], pad);
#endif
}
return ret;
}
void nedpmalloc_stats(nedpool *p) THROWSPEC
{
int n;
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
mspace_malloc_stats(p->m[n]);
#endif
}
}
size_t nedpmalloc_footprint(nedpool *p) THROWSPEC
{
size_t ret=0;
int n;
if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); }
for(n=0; p->m[n]; n++)
{
#if USE_ALLOCATOR==1
ret+=mspace_footprint(p->m[n]);
#endif
}
return ret;
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC
{
void **ret;
threadcache *tc;
int mymspace;
GetThreadCache(&p, &tc, &mymspace, &elemsize);
#if USE_ALLOCATOR==0
GETMSPACE(m, p, tc, mymspace, elemsno*elemsize,
ret=unsupported_operation("independent_calloc"));
#elif USE_ALLOCATOR==1
GETMSPACE(m, p, tc, mymspace, elemsno*elemsize,
ret=mspace_independent_calloc(m, elemsno, elemsize, chunks));
#endif
#if ENABLE_LOGGING
if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC))
{
size_t n;
for(n=0; n<elemsno; n++)
{
LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, elemsize, 0, 0, M2_ZERO_MEMORY, ret[n]);
}
}
#endif
return ret;
}
NEDMALLOCNOALIASATTR NEDMALLOCPTRATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC
{
void **ret;
threadcache *tc;
int mymspace;
size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t));
if(!adjustedsizes) return 0;
for(i=0; i<elems; i++)
adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i];
GetThreadCache(&p, &tc, &mymspace, 0);
#if USE_ALLOCATOR==0
GETMSPACE(m, p, tc, mymspace, 0,
ret=unsupported_operation("independent_comalloc"));
#elif USE_ALLOCATOR==1
GETMSPACE(m, p, tc, mymspace, 0,
ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks));
#endif
#if ENABLE_LOGGING
if(ret && (ENABLE_LOGGING & LOGENTRY_POOL_MALLOC))
{
size_t n;
for(n=0; n<elems; n++)
{
LogOperation(tc, p, LOGENTRY_POOL_MALLOC, mymspace, sizes[n], 0, 0, 0, ret[n]);
}
}
#endif
return ret;
}
#if defined(__cplusplus)
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3668_0 |
crossvul-cpp_data_bad_2020_2 | /*
* contrib/hstore/hstore_op.c
*/
#include "postgres.h"
#include "access/hash.h"
#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "utils/builtins.h"
#include "hstore.h"
/* old names for C functions */
HSTORE_POLLUTE(hstore_fetchval, fetchval);
HSTORE_POLLUTE(hstore_exists, exists);
HSTORE_POLLUTE(hstore_defined, defined);
HSTORE_POLLUTE(hstore_delete, delete);
HSTORE_POLLUTE(hstore_concat, hs_concat);
HSTORE_POLLUTE(hstore_contains, hs_contains);
HSTORE_POLLUTE(hstore_contained, hs_contained);
HSTORE_POLLUTE(hstore_akeys, akeys);
HSTORE_POLLUTE(hstore_avals, avals);
HSTORE_POLLUTE(hstore_skeys, skeys);
HSTORE_POLLUTE(hstore_svals, svals);
HSTORE_POLLUTE(hstore_each, each);
/*
* We're often finding a sequence of keys in ascending order. The
* "lowbound" parameter is used to cache lower bounds of searches
* between calls, based on this assumption. Pass NULL for it for
* one-off or unordered searches.
*/
int
hstoreFindKey(HStore *hs, int *lowbound, char *key, int keylen)
{
HEntry *entries = ARRPTR(hs);
int stopLow = lowbound ? *lowbound : 0;
int stopHigh = HS_COUNT(hs);
int stopMiddle;
char *base = STRPTR(hs);
while (stopLow < stopHigh)
{
int difference;
stopMiddle = stopLow + (stopHigh - stopLow) / 2;
if (HS_KEYLEN(entries, stopMiddle) == keylen)
difference = memcmp(HS_KEY(entries, base, stopMiddle), key, keylen);
else
difference = (HS_KEYLEN(entries, stopMiddle) > keylen) ? 1 : -1;
if (difference == 0)
{
if (lowbound)
*lowbound = stopMiddle + 1;
return stopMiddle;
}
else if (difference < 0)
stopLow = stopMiddle + 1;
else
stopHigh = stopMiddle;
}
if (lowbound)
*lowbound = stopLow;
return -1;
}
Pairs *
hstoreArrayToPairs(ArrayType *a, int *npairs)
{
Datum *key_datums;
bool *key_nulls;
int key_count;
Pairs *key_pairs;
int bufsiz;
int i,
j;
deconstruct_array(a,
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
if (key_count == 0)
{
*npairs = 0;
return NULL;
}
key_pairs = palloc(sizeof(Pairs) * key_count);
for (i = 0, j = 0; i < key_count; i++)
{
if (!key_nulls[i])
{
key_pairs[j].key = VARDATA(key_datums[i]);
key_pairs[j].keylen = VARSIZE(key_datums[i]) - VARHDRSZ;
key_pairs[j].val = NULL;
key_pairs[j].vallen = 0;
key_pairs[j].needfree = 0;
key_pairs[j].isnull = 1;
j++;
}
}
*npairs = hstoreUniquePairs(key_pairs, j, &bufsiz);
return key_pairs;
}
PG_FUNCTION_INFO_V1(hstore_fetchval);
Datum hstore_fetchval(PG_FUNCTION_ARGS);
Datum
hstore_fetchval(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
text *out;
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
if (idx < 0 || HS_VALISNULL(entries, idx))
PG_RETURN_NULL();
out = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), idx),
HS_VALLEN(entries, idx));
PG_RETURN_TEXT_P(out);
}
PG_FUNCTION_INFO_V1(hstore_exists);
Datum hstore_exists(PG_FUNCTION_ARGS);
Datum
hstore_exists(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
PG_RETURN_BOOL(idx >= 0);
}
PG_FUNCTION_INFO_V1(hstore_exists_any);
Datum hstore_exists_any(PG_FUNCTION_ARGS);
Datum
hstore_exists_any(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1);
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
int i;
int lowbound = 0;
bool res = false;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; i++)
{
int idx = hstoreFindKey(hs, &lowbound,
key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
res = true;
break;
}
}
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_exists_all);
Datum hstore_exists_all(PG_FUNCTION_ARGS);
Datum
hstore_exists_all(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1);
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
int i;
int lowbound = 0;
bool res = true;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; i++)
{
int idx = hstoreFindKey(hs, &lowbound,
key_pairs[i].key, key_pairs[i].keylen);
if (idx < 0)
{
res = false;
break;
}
}
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_defined);
Datum hstore_defined(PG_FUNCTION_ARGS);
Datum
hstore_defined(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
bool res = (idx >= 0 && !HS_VALISNULL(entries, idx));
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_delete);
Datum hstore_delete(PG_FUNCTION_ARGS);
Datum
hstore_delete(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
char *keyptr = VARDATA_ANY(key);
int keylen = VARSIZE_ANY_EXHDR(key);
HStore *out = palloc(VARSIZE(hs));
char *bufs,
*bufd,
*ptrd;
HEntry *es,
*ed;
int i;
int count = HS_COUNT(hs);
int outcount = 0;
SET_VARSIZE(out, VARSIZE(hs));
HS_SETCOUNT(out, count); /* temporary! */
bufs = STRPTR(hs);
es = ARRPTR(hs);
bufd = ptrd = STRPTR(out);
ed = ARRPTR(out);
for (i = 0; i < count; ++i)
{
int len = HS_KEYLEN(es, i);
char *ptrs = HS_KEY(es, bufs, i);
if (!(len == keylen && memcmp(ptrs, keyptr, keylen) == 0))
{
int vallen = HS_VALLEN(es, i);
HS_COPYITEM(ed, bufd, ptrd, ptrs, len, vallen, HS_VALISNULL(es, i));
++outcount;
}
}
HS_FINALIZE(out, outcount, bufd, ptrd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_delete_array);
Datum hstore_delete_array(PG_FUNCTION_ARGS);
Datum
hstore_delete_array(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HStore *out = palloc(VARSIZE(hs));
int hs_count = HS_COUNT(hs);
char *ps,
*bufd,
*pd;
HEntry *es,
*ed;
int i,
j;
int outcount = 0;
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
SET_VARSIZE(out, VARSIZE(hs));
HS_SETCOUNT(out, hs_count); /* temporary! */
ps = STRPTR(hs);
es = ARRPTR(hs);
bufd = pd = STRPTR(out);
ed = ARRPTR(out);
if (nkeys == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, hs, VARSIZE(hs));
HS_FIXSIZE(out, hs_count);
HS_SETCOUNT(out, hs_count);
PG_RETURN_POINTER(out);
}
/*
* this is in effect a merge between hs and key_pairs, both of which are
* already sorted by (keylen,key); we take keys from hs only
*/
for (i = j = 0; i < hs_count;)
{
int difference;
if (j >= nkeys)
difference = -1;
else
{
int skeylen = HS_KEYLEN(es, i);
if (skeylen == key_pairs[j].keylen)
difference = memcmp(HS_KEY(es, ps, i),
key_pairs[j].key,
key_pairs[j].keylen);
else
difference = (skeylen > key_pairs[j].keylen) ? 1 : -1;
}
if (difference > 0)
++j;
else if (difference == 0)
++i, ++j;
else
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
HS_VALLEN(es, i), HS_VALISNULL(es, i));
++outcount;
++i;
}
}
HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_delete_hstore);
Datum hstore_delete_hstore(PG_FUNCTION_ARGS);
Datum
hstore_delete_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HStore *hs2 = PG_GETARG_HS(1);
HStore *out = palloc(VARSIZE(hs));
int hs_count = HS_COUNT(hs);
int hs2_count = HS_COUNT(hs2);
char *ps,
*ps2,
*bufd,
*pd;
HEntry *es,
*es2,
*ed;
int i,
j;
int outcount = 0;
SET_VARSIZE(out, VARSIZE(hs));
HS_SETCOUNT(out, hs_count); /* temporary! */
ps = STRPTR(hs);
es = ARRPTR(hs);
ps2 = STRPTR(hs2);
es2 = ARRPTR(hs2);
bufd = pd = STRPTR(out);
ed = ARRPTR(out);
if (hs2_count == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, hs, VARSIZE(hs));
HS_FIXSIZE(out, hs_count);
HS_SETCOUNT(out, hs_count);
PG_RETURN_POINTER(out);
}
/*
* this is in effect a merge between hs and hs2, both of which are already
* sorted by (keylen,key); we take keys from hs only; for equal keys, we
* take the value from hs unless the values are equal
*/
for (i = j = 0; i < hs_count;)
{
int difference;
if (j >= hs2_count)
difference = -1;
else
{
int skeylen = HS_KEYLEN(es, i);
int s2keylen = HS_KEYLEN(es2, j);
if (skeylen == s2keylen)
difference = memcmp(HS_KEY(es, ps, i),
HS_KEY(es2, ps2, j),
skeylen);
else
difference = (skeylen > s2keylen) ? 1 : -1;
}
if (difference > 0)
++j;
else if (difference == 0)
{
int svallen = HS_VALLEN(es, i);
int snullval = HS_VALISNULL(es, i);
if (snullval != HS_VALISNULL(es2, j)
|| (!snullval
&& (svallen != HS_VALLEN(es2, j)
|| memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
svallen, snullval);
++outcount;
}
++i, ++j;
}
else
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
HS_VALLEN(es, i), HS_VALISNULL(es, i));
++outcount;
++i;
}
}
HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_concat);
Datum hstore_concat(PG_FUNCTION_ARGS);
Datum
hstore_concat(PG_FUNCTION_ARGS)
{
HStore *s1 = PG_GETARG_HS(0);
HStore *s2 = PG_GETARG_HS(1);
HStore *out = palloc(VARSIZE(s1) + VARSIZE(s2));
char *ps1,
*ps2,
*bufd,
*pd;
HEntry *es1,
*es2,
*ed;
int s1idx;
int s2idx;
int s1count = HS_COUNT(s1);
int s2count = HS_COUNT(s2);
int outcount = 0;
SET_VARSIZE(out, VARSIZE(s1) + VARSIZE(s2) - HSHRDSIZE);
HS_SETCOUNT(out, s1count + s2count);
if (s1count == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, s2, VARSIZE(s2));
HS_FIXSIZE(out, s2count);
HS_SETCOUNT(out, s2count);
PG_RETURN_POINTER(out);
}
if (s2count == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, s1, VARSIZE(s1));
HS_FIXSIZE(out, s1count);
HS_SETCOUNT(out, s1count);
PG_RETURN_POINTER(out);
}
ps1 = STRPTR(s1);
ps2 = STRPTR(s2);
bufd = pd = STRPTR(out);
es1 = ARRPTR(s1);
es2 = ARRPTR(s2);
ed = ARRPTR(out);
/*
* this is in effect a merge between s1 and s2, both of which are already
* sorted by (keylen,key); we take s2 for equal keys
*/
for (s1idx = s2idx = 0; s1idx < s1count || s2idx < s2count; ++outcount)
{
int difference;
if (s1idx >= s1count)
difference = 1;
else if (s2idx >= s2count)
difference = -1;
else
{
int s1keylen = HS_KEYLEN(es1, s1idx);
int s2keylen = HS_KEYLEN(es2, s2idx);
if (s1keylen == s2keylen)
difference = memcmp(HS_KEY(es1, ps1, s1idx),
HS_KEY(es2, ps2, s2idx),
s1keylen);
else
difference = (s1keylen > s2keylen) ? 1 : -1;
}
if (difference >= 0)
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es2, ps2, s2idx), HS_KEYLEN(es2, s2idx),
HS_VALLEN(es2, s2idx), HS_VALISNULL(es2, s2idx));
++s2idx;
if (difference == 0)
++s1idx;
}
else
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es1, ps1, s1idx), HS_KEYLEN(es1, s1idx),
HS_VALLEN(es1, s1idx), HS_VALISNULL(es1, s1idx));
++s1idx;
}
}
HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_slice_to_array);
Datum hstore_slice_to_array(PG_FUNCTION_ARGS);
Datum
hstore_slice_to_array(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
ArrayType *aout;
Datum *key_datums;
bool *key_nulls;
Datum *out_datums;
bool *out_nulls;
int key_count;
int i;
deconstruct_array(key_array,
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
if (key_count == 0)
{
aout = construct_empty_array(TEXTOID);
PG_RETURN_POINTER(aout);
}
out_datums = palloc(sizeof(Datum) * key_count);
out_nulls = palloc(sizeof(bool) * key_count);
for (i = 0; i < key_count; ++i)
{
text *key = (text *) DatumGetPointer(key_datums[i]);
int idx;
if (key_nulls[i])
idx = -1;
else
idx = hstoreFindKey(hs, NULL, VARDATA(key), VARSIZE(key) - VARHDRSZ);
if (idx < 0 || HS_VALISNULL(entries, idx))
{
out_nulls[i] = true;
out_datums[i] = (Datum) 0;
}
else
{
out_datums[i] = PointerGetDatum(
cstring_to_text_with_len(HS_VAL(entries, ptr, idx),
HS_VALLEN(entries, idx)));
out_nulls[i] = false;
}
}
aout = construct_md_array(out_datums, out_nulls,
ARR_NDIM(key_array),
ARR_DIMS(key_array),
ARR_LBOUND(key_array),
TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(aout);
}
PG_FUNCTION_INFO_V1(hstore_slice_to_hstore);
Datum hstore_slice_to_hstore(PG_FUNCTION_ARGS);
Datum
hstore_slice_to_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
HStore *out;
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
Pairs *out_pairs;
int bufsiz;
int lastidx = 0;
int i;
int out_count = 0;
if (nkeys == 0)
{
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
}
out_pairs = palloc(sizeof(Pairs) * nkeys);
bufsiz = 0;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; ++i)
{
int idx = hstoreFindKey(hs, &lastidx,
key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
out_pairs[out_count].key = key_pairs[i].key;
bufsiz += (out_pairs[out_count].keylen = key_pairs[i].keylen);
out_pairs[out_count].val = HS_VAL(entries, ptr, idx);
bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries, idx));
out_pairs[out_count].isnull = HS_VALISNULL(entries, idx);
out_pairs[out_count].needfree = false;
++out_count;
}
}
/*
* we don't use uniquePairs here because we know that the pairs list is
* already sorted and uniq'ed.
*/
out = hstorePairs(out_pairs, out_count, bufsiz);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_akeys);
Datum hstore_akeys(PG_FUNCTION_ARGS);
Datum
hstore_akeys(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
Datum *d;
ArrayType *a;
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
int count = HS_COUNT(hs);
int i;
if (count == 0)
{
a = construct_empty_array(TEXTOID);
PG_RETURN_POINTER(a);
}
d = (Datum *) palloc(sizeof(Datum) * count);
for (i = 0; i < count; ++i)
{
text *item = cstring_to_text_with_len(HS_KEY(entries, base, i),
HS_KEYLEN(entries, i));
d[i] = PointerGetDatum(item);
}
a = construct_array(d, count,
TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(a);
}
PG_FUNCTION_INFO_V1(hstore_avals);
Datum hstore_avals(PG_FUNCTION_ARGS);
Datum
hstore_avals(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
Datum *d;
bool *nulls;
ArrayType *a;
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
int count = HS_COUNT(hs);
int lb = 1;
int i;
if (count == 0)
{
a = construct_empty_array(TEXTOID);
PG_RETURN_POINTER(a);
}
d = (Datum *) palloc(sizeof(Datum) * count);
nulls = (bool *) palloc(sizeof(bool) * count);
for (i = 0; i < count; ++i)
{
if (HS_VALISNULL(entries, i))
{
d[i] = (Datum) 0;
nulls[i] = true;
}
else
{
text *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
HS_VALLEN(entries, i));
d[i] = PointerGetDatum(item);
nulls[i] = false;
}
}
a = construct_md_array(d, nulls, 1, &count, &lb,
TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(a);
}
static ArrayType *
hstore_to_array_internal(HStore *hs, int ndims)
{
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
int count = HS_COUNT(hs);
int out_size[2] = {0, 2};
int lb[2] = {1, 1};
Datum *out_datums;
bool *out_nulls;
int i;
Assert(ndims < 3);
if (count == 0 || ndims == 0)
return construct_empty_array(TEXTOID);
out_size[0] = count * 2 / ndims;
out_datums = palloc(sizeof(Datum) * count * 2);
out_nulls = palloc(sizeof(bool) * count * 2);
for (i = 0; i < count; ++i)
{
text *key = cstring_to_text_with_len(HS_KEY(entries, base, i),
HS_KEYLEN(entries, i));
out_datums[i * 2] = PointerGetDatum(key);
out_nulls[i * 2] = false;
if (HS_VALISNULL(entries, i))
{
out_datums[i * 2 + 1] = (Datum) 0;
out_nulls[i * 2 + 1] = true;
}
else
{
text *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
HS_VALLEN(entries, i));
out_datums[i * 2 + 1] = PointerGetDatum(item);
out_nulls[i * 2 + 1] = false;
}
}
return construct_md_array(out_datums, out_nulls,
ndims, out_size, lb,
TEXTOID, -1, false, 'i');
}
PG_FUNCTION_INFO_V1(hstore_to_array);
Datum hstore_to_array(PG_FUNCTION_ARGS);
Datum
hstore_to_array(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *out = hstore_to_array_internal(hs, 1);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_to_matrix);
Datum hstore_to_matrix(PG_FUNCTION_ARGS);
Datum
hstore_to_matrix(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *out = hstore_to_array_internal(hs, 2);
PG_RETURN_POINTER(out);
}
/*
* Common initialization function for the various set-returning
* funcs. fcinfo is only passed if the function is to return a
* composite; it will be used to look up the return tupledesc.
* we stash a copy of the hstore in the multi-call context in
* case it was originally toasted. (At least I assume that's why;
* there was no explanatory comment in the original code. --AG)
*/
static void
setup_firstcall(FuncCallContext *funcctx, HStore *hs,
FunctionCallInfoData *fcinfo)
{
MemoryContext oldcontext;
HStore *st;
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
st = (HStore *) palloc(VARSIZE(hs));
memcpy(st, hs, VARSIZE(hs));
funcctx->user_fctx = (void *) st;
if (fcinfo)
{
TupleDesc tupdesc;
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
}
MemoryContextSwitchTo(oldcontext);
}
PG_FUNCTION_INFO_V1(hstore_skeys);
Datum hstore_skeys(PG_FUNCTION_ARGS);
Datum
hstore_skeys(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
HStore *hs;
int i;
if (SRF_IS_FIRSTCALL())
{
hs = PG_GETARG_HS(0);
funcctx = SRF_FIRSTCALL_INIT();
setup_firstcall(funcctx, hs, NULL);
}
funcctx = SRF_PERCALL_SETUP();
hs = (HStore *) funcctx->user_fctx;
i = funcctx->call_cntr;
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
text *item;
item = cstring_to_text_with_len(HS_KEY(entries, STRPTR(hs), i),
HS_KEYLEN(entries, i));
SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
}
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(hstore_svals);
Datum hstore_svals(PG_FUNCTION_ARGS);
Datum
hstore_svals(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
HStore *hs;
int i;
if (SRF_IS_FIRSTCALL())
{
hs = PG_GETARG_HS(0);
funcctx = SRF_FIRSTCALL_INIT();
setup_firstcall(funcctx, hs, NULL);
}
funcctx = SRF_PERCALL_SETUP();
hs = (HStore *) funcctx->user_fctx;
i = funcctx->call_cntr;
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
if (HS_VALISNULL(entries, i))
{
ReturnSetInfo *rsi;
/* ugly ugly ugly. why no macro for this? */
(funcctx)->call_cntr++;
rsi = (ReturnSetInfo *) fcinfo->resultinfo;
rsi->isDone = ExprMultipleResult;
PG_RETURN_NULL();
}
else
{
text *item;
item = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), i),
HS_VALLEN(entries, i));
SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
}
}
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(hstore_contains);
Datum hstore_contains(PG_FUNCTION_ARGS);
Datum
hstore_contains(PG_FUNCTION_ARGS)
{
HStore *val = PG_GETARG_HS(0);
HStore *tmpl = PG_GETARG_HS(1);
bool res = true;
HEntry *te = ARRPTR(tmpl);
char *tstr = STRPTR(tmpl);
HEntry *ve = ARRPTR(val);
char *vstr = STRPTR(val);
int tcount = HS_COUNT(tmpl);
int lastidx = 0;
int i;
/*
* we exploit the fact that keys in "tmpl" are in strictly increasing
* order to narrow the hstoreFindKey search; each search can start one
* entry past the previous "found" entry, or at the lower bound of the
* search
*/
for (i = 0; res && i < tcount; ++i)
{
int idx = hstoreFindKey(val, &lastidx,
HS_KEY(te, tstr, i), HS_KEYLEN(te, i));
if (idx >= 0)
{
bool nullval = HS_VALISNULL(te, i);
int vallen = HS_VALLEN(te, i);
if (nullval != HS_VALISNULL(ve, idx)
|| (!nullval
&& (vallen != HS_VALLEN(ve, idx)
|| memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
res = false;
}
else
res = false;
}
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_contained);
Datum hstore_contained(PG_FUNCTION_ARGS);
Datum
hstore_contained(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(hstore_contains,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
));
}
PG_FUNCTION_INFO_V1(hstore_each);
Datum hstore_each(PG_FUNCTION_ARGS);
Datum
hstore_each(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
HStore *hs;
int i;
if (SRF_IS_FIRSTCALL())
{
hs = PG_GETARG_HS(0);
funcctx = SRF_FIRSTCALL_INIT();
setup_firstcall(funcctx, hs, fcinfo);
}
funcctx = SRF_PERCALL_SETUP();
hs = (HStore *) funcctx->user_fctx;
i = funcctx->call_cntr;
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
Datum res,
dvalues[2];
bool nulls[2] = {false, false};
text *item;
HeapTuple tuple;
item = cstring_to_text_with_len(HS_KEY(entries, ptr, i),
HS_KEYLEN(entries, i));
dvalues[0] = PointerGetDatum(item);
if (HS_VALISNULL(entries, i))
{
dvalues[1] = (Datum) 0;
nulls[1] = true;
}
else
{
item = cstring_to_text_with_len(HS_VAL(entries, ptr, i),
HS_VALLEN(entries, i));
dvalues[1] = PointerGetDatum(item);
}
tuple = heap_form_tuple(funcctx->tuple_desc, dvalues, nulls);
res = HeapTupleGetDatum(tuple);
SRF_RETURN_NEXT(funcctx, PointerGetDatum(res));
}
SRF_RETURN_DONE(funcctx);
}
/*
* btree sort order for hstores isn't intended to be useful; we really only
* care about equality versus non-equality. we compare the entire string
* buffer first, then the entry pos array.
*/
PG_FUNCTION_INFO_V1(hstore_cmp);
Datum hstore_cmp(PG_FUNCTION_ARGS);
Datum
hstore_cmp(PG_FUNCTION_ARGS)
{
HStore *hs1 = PG_GETARG_HS(0);
HStore *hs2 = PG_GETARG_HS(1);
int hcount1 = HS_COUNT(hs1);
int hcount2 = HS_COUNT(hs2);
int res = 0;
if (hcount1 == 0 || hcount2 == 0)
{
/*
* if either operand is empty, and the other is nonempty, the nonempty
* one is larger. If both are empty they are equal.
*/
if (hcount1 > 0)
res = 1;
else if (hcount2 > 0)
res = -1;
}
else
{
/* here we know both operands are nonempty */
char *str1 = STRPTR(hs1);
char *str2 = STRPTR(hs2);
HEntry *ent1 = ARRPTR(hs1);
HEntry *ent2 = ARRPTR(hs2);
size_t len1 = HSE_ENDPOS(ent1[2 * hcount1 - 1]);
size_t len2 = HSE_ENDPOS(ent2[2 * hcount2 - 1]);
res = memcmp(str1, str2, Min(len1, len2));
if (res == 0)
{
if (len1 > len2)
res = 1;
else if (len1 < len2)
res = -1;
else if (hcount1 > hcount2)
res = 1;
else if (hcount2 > hcount1)
res = -1;
else
{
int count = hcount1 * 2;
int i;
for (i = 0; i < count; ++i)
if (HSE_ENDPOS(ent1[i]) != HSE_ENDPOS(ent2[i]) ||
HSE_ISNULL(ent1[i]) != HSE_ISNULL(ent2[i]))
break;
if (i < count)
{
if (HSE_ENDPOS(ent1[i]) < HSE_ENDPOS(ent2[i]))
res = -1;
else if (HSE_ENDPOS(ent1[i]) > HSE_ENDPOS(ent2[i]))
res = 1;
else if (HSE_ISNULL(ent1[i]))
res = 1;
else if (HSE_ISNULL(ent2[i]))
res = -1;
}
}
}
else
{
res = (res > 0) ? 1 : -1;
}
}
/*
* this is a btree support function; this is one of the few places where
* memory needs to be explicitly freed.
*/
PG_FREE_IF_COPY(hs1, 0);
PG_FREE_IF_COPY(hs2, 1);
PG_RETURN_INT32(res);
}
PG_FUNCTION_INFO_V1(hstore_eq);
Datum hstore_eq(PG_FUNCTION_ARGS);
Datum
hstore_eq(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res == 0);
}
PG_FUNCTION_INFO_V1(hstore_ne);
Datum hstore_ne(PG_FUNCTION_ARGS);
Datum
hstore_ne(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res != 0);
}
PG_FUNCTION_INFO_V1(hstore_gt);
Datum hstore_gt(PG_FUNCTION_ARGS);
Datum
hstore_gt(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res > 0);
}
PG_FUNCTION_INFO_V1(hstore_ge);
Datum hstore_ge(PG_FUNCTION_ARGS);
Datum
hstore_ge(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res >= 0);
}
PG_FUNCTION_INFO_V1(hstore_lt);
Datum hstore_lt(PG_FUNCTION_ARGS);
Datum
hstore_lt(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res < 0);
}
PG_FUNCTION_INFO_V1(hstore_le);
Datum hstore_le(PG_FUNCTION_ARGS);
Datum
hstore_le(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res <= 0);
}
PG_FUNCTION_INFO_V1(hstore_hash);
Datum hstore_hash(PG_FUNCTION_ARGS);
Datum
hstore_hash(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
Datum hval = hash_any((unsigned char *) VARDATA(hs),
VARSIZE(hs) - VARHDRSZ);
/*
* this is the only place in the code that cares whether the overall
* varlena size exactly matches the true data size; this assertion should
* be maintained by all the other code, but we make it explicit here.
*/
Assert(VARSIZE(hs) ==
(HS_COUNT(hs) != 0 ?
CALCDATASIZE(HS_COUNT(hs),
HSE_ENDPOS(ARRPTR(hs)[2 * HS_COUNT(hs) - 1])) :
HSHRDSIZE));
PG_FREE_IF_COPY(hs, 0);
PG_RETURN_DATUM(hval);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_2 |
crossvul-cpp_data_bad_2155_1 | /*-
* Copyright (c) 2008 Christos Zoulas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Parse Composite Document Files, the format used in Microsoft Office
* document files before they switched to zipped XML.
* Info from: http://sc.openoffice.org/compdocfileformat.pdf
*
* N.B. This is the "Composite Document File" format, and not the
* "Compound Document Format", nor the "Channel Definition Format".
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: cdf.c,v 1.53 2013/02/26 16:20:42 christos Exp $")
#endif
#include <assert.h>
#ifdef CDF_DEBUG
#include <err.h>
#endif
#include <stdlib.h>
#ifdef PHP_WIN32
#include "win32/unistd.h"
#else
#include <unistd.h>
#endif
#ifndef UINT32_MAX
# define UINT32_MAX (0xffffffff)
#endif
#include <string.h>
#include <time.h>
#include <ctype.h>
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
#ifndef EFTYPE
#define EFTYPE EINVAL
#endif
#include "cdf.h"
#ifdef CDF_DEBUG
#define DPRINTF(a) printf a, fflush(stdout)
#else
#define DPRINTF(a)
#endif
static union {
char s[4];
uint32_t u;
} cdf_bo;
#define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304)
#define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x)))
#define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x)))
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
/*
* swap a short
*/
static uint16_t
_cdf_tole2(uint16_t sv)
{
uint16_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[1];
d[1] = s[0];
return rv;
}
/*
* swap an int
*/
static uint32_t
_cdf_tole4(uint32_t sv)
{
uint32_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[3];
d[1] = s[2];
d[2] = s[1];
d[3] = s[0];
return rv;
}
/*
* swap a quad
*/
static uint64_t
_cdf_tole8(uint64_t sv)
{
uint64_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[7];
d[1] = s[6];
d[2] = s[5];
d[3] = s[4];
d[4] = s[3];
d[5] = s[2];
d[6] = s[1];
d[7] = s[0];
return rv;
}
/*
* grab a uint32_t from a possibly unaligned address, and return it in
* the native host order.
*/
static uint32_t
cdf_getuint32(const uint8_t *p, size_t offs)
{
uint32_t rv;
(void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
return CDF_TOLE4(rv);
}
#define CDF_UNPACK(a) \
(void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a)
#define CDF_UNPACKA(a) \
(void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a)
uint16_t
cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
uint32_t
cdf_tole4(uint32_t sv)
{
return CDF_TOLE4(sv);
}
uint64_t
cdf_tole8(uint64_t sv)
{
return CDF_TOLE8(sv);
}
void
cdf_swap_header(cdf_header_t *h)
{
size_t i;
h->h_magic = CDF_TOLE8(h->h_magic);
h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]);
h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]);
h->h_revision = CDF_TOLE2(h->h_revision);
h->h_version = CDF_TOLE2(h->h_version);
h->h_byte_order = CDF_TOLE2(h->h_byte_order);
h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2);
h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2);
h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat);
h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory);
h->h_min_size_standard_stream =
CDF_TOLE4(h->h_min_size_standard_stream);
h->h_secid_first_sector_in_short_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat);
h->h_num_sectors_in_short_sat =
CDF_TOLE4(h->h_num_sectors_in_short_sat);
h->h_secid_first_sector_in_master_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat);
h->h_num_sectors_in_master_sat =
CDF_TOLE4(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]);
}
void
cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
void
cdf_swap_dir(cdf_directory_t *d)
{
d->d_namelen = CDF_TOLE2(d->d_namelen);
d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child);
d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child);
d->d_storage = CDF_TOLE4((uint32_t)d->d_storage);
d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
d->d_flags = CDF_TOLE4(d->d_flags);
d->d_created = CDF_TOLE8((uint64_t)d->d_created);
d->d_modified = CDF_TOLE8((uint64_t)d->d_modified);
d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector);
d->d_size = CDF_TOLE4(d->d_size);
}
void
cdf_swap_class(cdf_classid_t *d)
{
d->cl_dword = CDF_TOLE4(d->cl_dword);
d->cl_word[0] = CDF_TOLE2(d->cl_word[0]);
d->cl_word[1] = CDF_TOLE2(d->cl_word[1]);
}
void
cdf_unpack_dir(cdf_directory_t *d, char *buf)
{
size_t len = 0;
CDF_UNPACKA(d->d_name);
CDF_UNPACK(d->d_namelen);
CDF_UNPACK(d->d_type);
CDF_UNPACK(d->d_color);
CDF_UNPACK(d->d_left_child);
CDF_UNPACK(d->d_right_child);
CDF_UNPACK(d->d_storage);
CDF_UNPACKA(d->d_storage_uuid);
CDF_UNPACK(d->d_flags);
CDF_UNPACK(d->d_created);
CDF_UNPACK(d->d_modified);
CDF_UNPACK(d->d_stream_first_sector);
CDF_UNPACK(d->d_size);
CDF_UNPACK(d->d_unused0);
}
static int
cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
(void)&line;
if (e >= b && (size_t)(e - b) <= ss * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
ss * sst->sst_len, ss, sst->sst_len));
errno = EFTYPE;
return -1;
}
static ssize_t
cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len)
{
size_t siz = (size_t)off + len;
if ((off_t)(off + len) != (off_t)siz) {
errno = EINVAL;
return -1;
}
if (info->i_buf != NULL && info->i_len >= siz) {
(void)memcpy(buf, &info->i_buf[off], len);
return (ssize_t)len;
}
if (info->i_fd == -1)
return -1;
if (FINFO_LSEEK_FUNC(info->i_fd, off, SEEK_SET) == (off_t)-1)
return -1;
if (FINFO_READ_FUNC(info->i_fd, buf, len) != (ssize_t)len)
return -1;
return (ssize_t)len;
}
int
cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
{
char buf[512];
(void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1)
return -1;
cdf_unpack_header(h, buf);
cdf_swap_header(h);
if (h->h_magic != CDF_MAGIC) {
DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%"
INT64_T_FORMAT "x\n",
(unsigned long long)h->h_magic,
(unsigned long long)CDF_MAGIC));
goto out;
}
if (h->h_sec_size_p2 > 20) {
DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2));
goto out;
}
if (h->h_short_sec_size_p2 > 20) {
DPRINTF(("Bad short sector size 0x%u\n",
h->h_short_sec_size_p2));
goto out;
}
return 0;
out:
errno = EFTYPE;
return -1;
}
ssize_t
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);
}
ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos + len, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
/*
* Read the sector allocation table.
*/
int
cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat)
{
size_t i, j, k;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t *msa, mid, sec;
size_t nsatpersec = (ss / sizeof(mid)) - 1;
for (i = 0; i < __arraycount(h->h_master_sat); i++)
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
#define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss))
if ((nsatpersec > 0 &&
h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) ||
i > CDF_SEC_LIMIT) {
DPRINTF(("Number of sectors in master SAT too big %u %"
SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i));
errno = EFTYPE;
return -1;
}
sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i;
DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n",
sat->sat_len, ss));
if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss)))
== NULL)
return -1;
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] < 0)
break;
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
h->h_master_sat[i]) != (ssize_t)ss) {
DPRINTF(("Reading sector %d", h->h_master_sat[i]));
goto out1;
}
}
if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL)
goto out1;
mid = h->h_secid_first_sector_in_master_sat;
for (j = 0; j < h->h_num_sectors_in_master_sat; j++) {
if (mid < 0)
goto out;
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Reading master sector loop limit"));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) {
DPRINTF(("Reading master sector %d", mid));
goto out2;
}
for (k = 0; k < nsatpersec; k++, i++) {
sec = CDF_TOLE4((uint32_t)msa[k]);
if (sec < 0)
goto out;
if (i >= sat->sat_len) {
DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT
"u >= %" SIZE_T_FORMAT "u", i, sat->sat_len));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
sec) != (ssize_t)ss) {
DPRINTF(("Reading sector %d",
CDF_TOLE4(msa[k])));
goto out2;
}
}
mid = CDF_TOLE4((uint32_t)msa[nsatpersec]);
}
out:
sat->sat_len = i;
free(msa);
return 0;
out2:
free(msa);
out1:
free(sat->sat_tab);
return -1;
}
size_t
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size)
/ sizeof(maxsector));
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid >= maxsector) {
DPRINTF(("Sector %d >= %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
DPRINTF(("\n"));
return i;
}
int
cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SEC_SIZE(h), i, j;
ssize_t nr;
scn->sst_len = cdf_count_chain(sat, sid, ss);
scn->sst_dirlen = len;
if (scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read long sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading long sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
scn->sst_len));
errno = EFTYPE;
goto out;
}
if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h,
sid)) != (ssize_t)ss) {
if (i == scn->sst_len - 1 && nr > 0) {
/* Last sector might be truncated */
return 0;
}
DPRINTF(("Reading long sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n",
i, scn->sst_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h,
sid) != (ssize_t)ss) {
DPRINTF(("Reading short sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
scn);
else
return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
}
int
cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_dir_t *dir)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h), ns, nd;
char *buf;
cdf_secid_t sid = h->h_secid_first_directory;
ns = cdf_count_chain(sat, sid, ss);
if (ns == (size_t)-1)
return -1;
nd = ss / CDF_DIRECTORY_SIZE;
dir->dir_len = ns * nd;
dir->dir_tab = CAST(cdf_directory_t *,
calloc(dir->dir_len, sizeof(dir->dir_tab[0])));
if (dir->dir_tab == NULL)
return -1;
if ((buf = CAST(char *, malloc(ss))) == NULL) {
free(dir->dir_tab);
return -1;
}
for (j = i = 0; i < ns; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read dir loop limit"));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) {
DPRINTF(("Reading directory sector %d", sid));
goto out;
}
for (j = 0; j < nd; j++) {
cdf_unpack_dir(&dir->dir_tab[i * nd + j],
&buf[j * CDF_DIRECTORY_SIZE]);
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (NEED_SWAP)
for (i = 0; i < dir->dir_len; i++)
cdf_swap_dir(&dir->dir_tab[i]);
free(buf);
return 0;
out:
free(dir->dir_tab);
free(buf);
return -1;
}
int
cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_sat_t *ssat)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h));
if (ssat->sat_len == (size_t)-1)
return -1;
ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss));
if (ssat->sat_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sat sector loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= ssat->sat_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
ssat->sat_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
(ssize_t)ss) {
DPRINTF(("Reading short sat sector %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(ssat->sat_tab);
return -1;
}
int
cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
for (i = 0; i < dir->dir_len; i++)
if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE)
break;
/* If the it is not there, just fake it; some docs don't have it */
if (i == dir->dir_len)
goto out;
d = &dir->dir_tab[i];
/* If the it is not there, just fake it; some docs don't have it */
if (d->d_stream_first_sector < 0)
goto out;
return cdf_read_long_sector_chain(info, h, sat,
d->d_stream_first_sector, d->d_size, scn);
out:
scn->sst_tab = NULL;
scn->sst_len = 0;
scn->sst_dirlen = 0;
return 0;
}
static int
cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return (unsigned char)*d - CDF_TOLE2(*s);
return 0;
}
int
cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
static const char name[] = "\05SummaryInformation";
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name))
== 0)
break;
if (i == 0) {
DPRINTF(("Cannot find summary information section\n"));
errno = ESRCH;
return -1;
}
d = &dir->dir_tab[i - 1];
return cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, scn);
}
int
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs, tail = (i << 1) + 1;
if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t),
__LINE__) == -1)
goto out;
ofs = CDF_GETUINT32(p, tail);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
int
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE4(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info,
count, &maxcount) == -1)
return -1;
return 0;
}
int
cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
{
return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
"%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
id->cl_six[5]);
}
static const struct {
uint32_t v;
const char *n;
} vn[] = {
{ CDF_PROPERTY_CODE_PAGE, "Code page" },
{ CDF_PROPERTY_TITLE, "Title" },
{ CDF_PROPERTY_SUBJECT, "Subject" },
{ CDF_PROPERTY_AUTHOR, "Author" },
{ CDF_PROPERTY_KEYWORDS, "Keywords" },
{ CDF_PROPERTY_COMMENTS, "Comments" },
{ CDF_PROPERTY_TEMPLATE, "Template" },
{ CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" },
{ CDF_PROPERTY_REVISION_NUMBER, "Revision Number" },
{ CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" },
{ CDF_PROPERTY_LAST_PRINTED, "Last Printed" },
{ CDF_PROPERTY_CREATE_TIME, "Create Time/Date" },
{ CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" },
{ CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" },
{ CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" },
{ CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" },
{ CDF_PROPERTY_THUMBNAIL, "Thumbnail" },
{ CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" },
{ CDF_PROPERTY_SECURITY, "Security" },
{ CDF_PROPERTY_LOCALE_ID, "Locale ID" },
};
int
cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
{
size_t i;
for (i = 0; i < __arraycount(vn); i++)
if (vn[i].v == p)
return snprintf(buf, bufsiz, "%s", vn[i].n);
return snprintf(buf, bufsiz, "0x%x", p);
}
int
cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
{
int len = 0;
int days, hours, mins, secs;
ts /= CDF_TIME_PREC;
secs = (int)(ts % 60);
ts /= 60;
mins = (int)(ts % 60);
ts /= 60;
hours = (int)(ts % 24);
ts /= 24;
days = (int)ts;
if (days) {
len += snprintf(buf + len, bufsiz - len, "%dd+", days);
if ((size_t)len >= bufsiz)
return len;
}
if (days || hours) {
len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
if ((size_t)len >= bufsiz)
return len;
}
len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
if ((size_t)len >= bufsiz)
return len;
len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
return len;
}
#ifdef CDF_DEBUG
void
cdf_dump_header(const cdf_header_t *h)
{
size_t i;
#define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b)
#define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \
h->h_ ## b, 1 << h->h_ ## b)
DUMP("%d", revision);
DUMP("%d", version);
DUMP("0x%x", byte_order);
DUMP2("%d", sec_size_p2);
DUMP2("%d", short_sec_size_p2);
DUMP("%d", num_sectors_in_sat);
DUMP("%d", secid_first_directory);
DUMP("%d", min_size_standard_stream);
DUMP("%d", secid_first_sector_in_short_sat);
DUMP("%d", num_sectors_in_short_sat);
DUMP("%d", secid_first_sector_in_master_sat);
DUMP("%d", num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
(void)fprintf(stderr, "%35.35s[%.3zu] = %d\n",
"master_sat", i, h->h_master_sat[i]);
}
}
void
cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
{
size_t i, j, s = size / sizeof(cdf_secid_t);
for (i = 0; i < sat->sat_len; i++) {
(void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
SIZE_T_FORMAT "u: ", prefix, i, i * s);
for (j = 0; j < s; j++) {
(void)fprintf(stderr, "%5d, ",
CDF_TOLE4(sat->sat_tab[s * i + j]));
if ((j + 1) % 10 == 0)
(void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
"u: ", i * s + j + 1);
}
(void)fprintf(stderr, "\n");
}
}
void
cdf_dump(void *v, size_t len)
{
size_t i, j;
unsigned char *p = v;
char abuf[16];
(void)fprintf(stderr, "%.4x: ", 0);
for (i = 0, j = 0; i < len; i++, p++) {
(void)fprintf(stderr, "%.2x ", *p);
abuf[j++] = isprint(*p) ? *p : '.';
if (j == 16) {
j = 0;
abuf[15] = '\0';
(void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ",
abuf, i + 1);
}
}
(void)fprintf(stderr, "\n");
}
void
cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
cdf_dump(sst->sst_tab, ss * sst->sst_len);
}
void
cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir)
{
size_t i, j;
cdf_directory_t *d;
char name[__arraycount(d->d_name)];
cdf_stream_t scn;
struct timeval ts;
static const char *types[] = { "empty", "user storage",
"user stream", "lockbytes", "property", "root storage" };
for (i = 0; i < dir->dir_len; i++) {
d = &dir->dir_tab[i];
for (j = 0; j < sizeof(name); j++)
name[j] = (char)CDF_TOLE2(d->d_name[j]);
(void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n",
i, name);
if (d->d_type < __arraycount(types))
(void)fprintf(stderr, "Type: %s\n", types[d->d_type]);
else
(void)fprintf(stderr, "Type: %d\n", d->d_type);
(void)fprintf(stderr, "Color: %s\n",
d->d_color ? "black" : "red");
(void)fprintf(stderr, "Left child: %d\n", d->d_left_child);
(void)fprintf(stderr, "Right child: %d\n", d->d_right_child);
(void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags);
cdf_timestamp_to_timespec(&ts, d->d_created);
(void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec));
cdf_timestamp_to_timespec(&ts, d->d_modified);
(void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec));
(void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector);
(void)fprintf(stderr, "Size %d\n", d->d_size);
switch (d->d_type) {
case CDF_DIR_TYPE_USER_STORAGE:
(void)fprintf(stderr, "Storage: %d\n", d->d_storage);
break;
case CDF_DIR_TYPE_USER_STREAM:
if (sst == NULL)
break;
if (cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, &scn) == -1) {
warn("Can't read stream for %s at %d len %d",
name, d->d_stream_first_sector, d->d_size);
break;
}
cdf_dump_stream(h, &scn);
free(scn.sst_tab);
break;
default:
break;
}
}
}
void
cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
{
cdf_timestamp_t tp;
struct timeval ts;
char buf[64];
size_t i, j;
for (i = 0; i < count; i++) {
cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
(void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
switch (info[i].pi_type) {
case CDF_NULL:
break;
case CDF_SIGNED16:
(void)fprintf(stderr, "signed 16 [%hd]\n",
info[i].pi_s16);
break;
case CDF_SIGNED32:
(void)fprintf(stderr, "signed 32 [%d]\n",
info[i].pi_s32);
break;
case CDF_UNSIGNED32:
(void)fprintf(stderr, "unsigned 32 [%u]\n",
info[i].pi_u32);
break;
case CDF_FLOAT:
(void)fprintf(stderr, "float [%g]\n",
info[i].pi_f);
break;
case CDF_DOUBLE:
(void)fprintf(stderr, "double [%g]\n",
info[i].pi_d);
break;
case CDF_LENGTH32_STRING:
(void)fprintf(stderr, "string %u [%.*s]\n",
info[i].pi_str.s_len,
info[i].pi_str.s_len, info[i].pi_str.s_buf);
break;
case CDF_LENGTH32_WSTRING:
(void)fprintf(stderr, "string %u [",
info[i].pi_str.s_len);
for (j = 0; j < info[i].pi_str.s_len - 1; j++)
(void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
(void)fprintf(stderr, "]\n");
break;
case CDF_FILETIME:
tp = info[i].pi_tp;
#if defined(PHP_WIN32) && _MSC_VER <= 1500
if (tp < 1000000000000000i64) {
#else
if (tp < 1000000000000000LL) {
#endif
cdf_print_elapsed_time(buf, sizeof(buf), tp);
(void)fprintf(stderr, "timestamp %s\n", buf);
} else {
cdf_timestamp_to_timespec(&ts, tp);
(void)fprintf(stderr, "timestamp %s",
cdf_ctime(&ts.tv_sec));
}
break;
case CDF_CLIPBOARD:
(void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
break;
default:
DPRINTF(("Don't know how to deal with %x\n",
info[i].pi_type));
break;
}
}
}
void
cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst)
{
char buf[128];
cdf_summary_info_header_t ssi;
cdf_property_info_t *info;
size_t count;
(void)&h;
if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1)
return;
(void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order);
(void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff,
ssi.si_os_version >> 8);
(void)fprintf(stderr, "Os %d\n", ssi.si_os);
cdf_print_classid(buf, sizeof(buf), &ssi.si_class);
(void)fprintf(stderr, "Class %s\n", buf);
(void)fprintf(stderr, "Count %d\n", ssi.si_count);
cdf_dump_property_info(info, count);
free(info);
}
#endif
#ifdef TEST
int
main(int argc, char *argv[])
{
int i;
cdf_header_t h;
cdf_sat_t sat, ssat;
cdf_stream_t sst, scn;
cdf_dir_t dir;
cdf_info_t info;
if (argc < 2) {
(void)fprintf(stderr, "Usage: %s <filename>\n", getprogname());
return -1;
}
info.i_buf = NULL;
info.i_len = 0;
for (i = 1; i < argc; i++) {
if ((info.i_fd = open(argv[1], O_RDONLY)) == -1)
err(1, "Cannot open `%s'", argv[1]);
if (cdf_read_header(&info, &h) == -1)
err(1, "Cannot read header");
#ifdef CDF_DEBUG
cdf_dump_header(&h);
#endif
if (cdf_read_sat(&info, &h, &sat) == -1)
err(1, "Cannot read sat");
#ifdef CDF_DEBUG
cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h));
#endif
if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1)
err(1, "Cannot read ssat");
#ifdef CDF_DEBUG
cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h));
#endif
if (cdf_read_dir(&info, &h, &sat, &dir) == -1)
err(1, "Cannot read dir");
if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1)
err(1, "Cannot read short stream");
#ifdef CDF_DEBUG
cdf_dump_stream(&h, &sst);
#endif
#ifdef CDF_DEBUG
cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir);
#endif
if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir,
&scn) == -1)
err(1, "Cannot read summary info");
#ifdef CDF_DEBUG
cdf_dump_summary_info(&h, &scn);
#endif
(void)close(info.i_fd);
}
return 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2155_1 |
crossvul-cpp_data_good_5000_0 | /*
* x_tables core - Backend for {ip,ip6,arp}_tables
*
* Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
* Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* Based on existing ip_tables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <net/net_namespace.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
struct compat_delta {
unsigned int offset; /* offset in kernel */
int delta; /* delta in 32bit user land */
};
struct xt_af {
struct mutex mutex;
struct list_head match;
struct list_head target;
#ifdef CONFIG_COMPAT
struct mutex compat_mutex;
struct compat_delta *compat_tab;
unsigned int number; /* number of slots in compat_tab[] */
unsigned int cur; /* number of used slots in compat_tab[] */
#endif
};
static struct xt_af *xt;
static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_UNSPEC] = "x",
[NFPROTO_IPV4] = "ip",
[NFPROTO_ARP] = "arp",
[NFPROTO_BRIDGE] = "eb",
[NFPROTO_IPV6] = "ip6",
};
/* Registration hooks for targets. */
int xt_register_target(struct xt_target *target)
{
u_int8_t af = target->family;
mutex_lock(&xt[af].mutex);
list_add(&target->list, &xt[af].target);
mutex_unlock(&xt[af].mutex);
return 0;
}
EXPORT_SYMBOL(xt_register_target);
void
xt_unregister_target(struct xt_target *target)
{
u_int8_t af = target->family;
mutex_lock(&xt[af].mutex);
list_del(&target->list);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_target);
int
xt_register_targets(struct xt_target *target, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = xt_register_target(&target[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
xt_unregister_targets(target, i);
return err;
}
EXPORT_SYMBOL(xt_register_targets);
void
xt_unregister_targets(struct xt_target *target, unsigned int n)
{
while (n-- > 0)
xt_unregister_target(&target[n]);
}
EXPORT_SYMBOL(xt_unregister_targets);
int xt_register_match(struct xt_match *match)
{
u_int8_t af = match->family;
mutex_lock(&xt[af].mutex);
list_add(&match->list, &xt[af].match);
mutex_unlock(&xt[af].mutex);
return 0;
}
EXPORT_SYMBOL(xt_register_match);
void
xt_unregister_match(struct xt_match *match)
{
u_int8_t af = match->family;
mutex_lock(&xt[af].mutex);
list_del(&match->list);
mutex_unlock(&xt[af].mutex);
}
EXPORT_SYMBOL(xt_unregister_match);
int
xt_register_matches(struct xt_match *match, unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = xt_register_match(&match[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
xt_unregister_matches(match, i);
return err;
}
EXPORT_SYMBOL(xt_register_matches);
void
xt_unregister_matches(struct xt_match *match, unsigned int n)
{
while (n-- > 0)
xt_unregister_match(&match[n]);
}
EXPORT_SYMBOL(xt_unregister_matches);
/*
* These are weird, but module loading must not be done with mutex
* held (since they will register), and we have to have a single
* function to use.
*/
/* Find match, grabs ref. Returns ERR_PTR() on error. */
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
{
struct xt_match *m;
int err = -ENOENT;
mutex_lock(&xt[af].mutex);
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision == revision) {
if (try_module_get(m->me)) {
mutex_unlock(&xt[af].mutex);
return m;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC)
/* Try searching again in the family-independent list */
return xt_find_match(NFPROTO_UNSPEC, name, revision);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_match);
struct xt_match *
xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{
struct xt_match *match;
match = xt_find_match(nfproto, name, revision);
if (IS_ERR(match)) {
request_module("%st_%s", xt_prefix[nfproto], name);
match = xt_find_match(nfproto, name, revision);
}
return match;
}
EXPORT_SYMBOL_GPL(xt_request_find_match);
/* Find target, grabs ref. Returns ERR_PTR() on error. */
struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *t;
int err = -ENOENT;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision == revision) {
if (try_module_get(t->me)) {
mutex_unlock(&xt[af].mutex);
return t;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
if (af != NFPROTO_UNSPEC)
/* Try searching again in the family-independent list */
return xt_find_target(NFPROTO_UNSPEC, name, revision);
return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_target);
struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
{
struct xt_target *target;
target = xt_find_target(af, name, revision);
if (IS_ERR(target)) {
request_module("%st_%s", xt_prefix[af], name);
target = xt_find_target(af, name, revision);
}
return target;
}
EXPORT_SYMBOL_GPL(xt_request_find_target);
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_match *m;
int have_rev = 0;
list_for_each_entry(m, &xt[af].match, list) {
if (strcmp(m->name, name) == 0) {
if (m->revision > *bestp)
*bestp = m->revision;
if (m->revision == revision)
have_rev = 1;
}
}
if (af != NFPROTO_UNSPEC && !have_rev)
return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
{
const struct xt_target *t;
int have_rev = 0;
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision > *bestp)
*bestp = t->revision;
if (t->revision == revision)
have_rev = 1;
}
}
if (af != NFPROTO_UNSPEC && !have_rev)
return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
return have_rev;
}
/* Returns true or false (if no such extension at all) */
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
int *err)
{
int have_rev, best = -1;
mutex_lock(&xt[af].mutex);
if (target == 1)
have_rev = target_revfn(af, name, revision, &best);
else
have_rev = match_revfn(af, name, revision, &best);
mutex_unlock(&xt[af].mutex);
/* Nothing at all? Return 0 to try loading module. */
if (best == -1) {
*err = -ENOENT;
return 0;
}
*err = best;
if (!have_rev)
*err = -EPROTONOSUPPORT;
return 1;
}
EXPORT_SYMBOL_GPL(xt_find_revision);
static char *
textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
{
static const char *const inetbr_names[] = {
"PREROUTING", "INPUT", "FORWARD",
"OUTPUT", "POSTROUTING", "BROUTING",
};
static const char *const arp_names[] = {
"INPUT", "FORWARD", "OUTPUT",
};
const char *const *names;
unsigned int i, max;
char *p = buf;
bool np = false;
int res;
names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
ARRAY_SIZE(inetbr_names);
*p = '\0';
for (i = 0; i < max; ++i) {
if (!(mask & (1 << i)))
continue;
res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
if (res > 0) {
size -= res;
p += res;
}
np = true;
}
return buf;
}
int xt_check_match(struct xt_mtchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->match->matchsize) != size &&
par->match->matchsize != -1) {
/*
* ebt_among is exempt from centralized matchsize checking
* because it uses a dynamic-size data set.
*/
pr_err("%s_tables: %s.%u match: invalid size "
"%u (kernel) != (user) %u\n",
xt_prefix[par->family], par->match->name,
par->match->revision,
XT_ALIGN(par->match->matchsize), size);
return -EINVAL;
}
if (par->match->table != NULL &&
strcmp(par->match->table, par->table) != 0) {
pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
xt_prefix[par->family], par->match->name,
par->match->table, par->table);
return -EINVAL;
}
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
char used[64], allow[64];
pr_err("%s_tables: %s match: used from hooks %s, but only "
"valid from %s\n",
xt_prefix[par->family], par->match->name,
textify_hooks(used, sizeof(used), par->hook_mask,
par->family),
textify_hooks(allow, sizeof(allow), par->match->hooks,
par->family));
return -EINVAL;
}
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
pr_err("%s_tables: %s match: only valid for protocol %u\n",
xt_prefix[par->family], par->match->name,
par->match->proto);
return -EINVAL;
}
if (par->match->checkentry != NULL) {
ret = par->match->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_match);
#ifdef CONFIG_COMPAT
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
struct xt_af *xp = &xt[af];
if (!xp->compat_tab) {
if (!xp->number)
return -EINVAL;
xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
if (!xp->compat_tab)
return -ENOMEM;
xp->cur = 0;
}
if (xp->cur >= xp->number)
return -EINVAL;
if (xp->cur)
delta += xp->compat_tab[xp->cur - 1].delta;
xp->compat_tab[xp->cur].offset = offset;
xp->compat_tab[xp->cur].delta = delta;
xp->cur++;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);
void xt_compat_flush_offsets(u_int8_t af)
{
if (xt[af].compat_tab) {
vfree(xt[af].compat_tab);
xt[af].compat_tab = NULL;
xt[af].number = 0;
xt[af].cur = 0;
}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
struct compat_delta *tmp = xt[af].compat_tab;
int mid, left = 0, right = xt[af].cur - 1;
while (left <= right) {
mid = (left + right) >> 1;
if (offset > tmp[mid].offset)
left = mid + 1;
else if (offset < tmp[mid].offset)
right = mid - 1;
else
return mid ? tmp[mid - 1].delta : 0;
}
return left ? tmp[left - 1].delta : 0;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
void xt_compat_init_offsets(u_int8_t af, unsigned int number)
{
xt[af].number = number;
xt[af].cur = 0;
}
EXPORT_SYMBOL(xt_compat_init_offsets);
int xt_compat_match_offset(const struct xt_match *match)
{
u_int16_t csize = match->compatsize ? : match->matchsize;
return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_match_offset);
int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
int pad, off = xt_compat_match_offset(match);
u_int16_t msize = cm->u.user.match_size;
m = *dstptr;
memcpy(m, cm, sizeof(*cm));
if (match->compat_from_user)
match->compat_from_user(m->data, cm->data);
else
memcpy(m->data, cm->data, msize - sizeof(*cm));
pad = XT_ALIGN(match->matchsize) - match->matchsize;
if (pad > 0)
memset(m->data + match->matchsize, 0, pad);
msize += off;
m->u.user.match_size = msize;
*size += off;
*dstptr += msize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match __user *cm = *dstptr;
int off = xt_compat_match_offset(match);
u_int16_t msize = m->u.user.match_size - off;
if (copy_to_user(cm, m, sizeof(*cm)) ||
put_user(msize, &cm->u.user.match_size) ||
copy_to_user(cm->u.user.name, m->u.kernel.match->name,
strlen(m->u.kernel.match->name) + 1))
return -EFAULT;
if (match->compat_to_user) {
if (match->compat_to_user((void __user *)cm->data, m->data))
return -EFAULT;
} else {
if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
return -EFAULT;
}
*size -= off;
*dstptr += msize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
#endif /* CONFIG_COMPAT */
int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
int ret;
if (XT_ALIGN(par->target->targetsize) != size) {
pr_err("%s_tables: %s.%u target: invalid size "
"%u (kernel) != (user) %u\n",
xt_prefix[par->family], par->target->name,
par->target->revision,
XT_ALIGN(par->target->targetsize), size);
return -EINVAL;
}
if (par->target->table != NULL &&
strcmp(par->target->table, par->table) != 0) {
pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
xt_prefix[par->family], par->target->name,
par->target->table, par->table);
return -EINVAL;
}
if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
char used[64], allow[64];
pr_err("%s_tables: %s target: used from hooks %s, but only "
"usable from %s\n",
xt_prefix[par->family], par->target->name,
textify_hooks(used, sizeof(used), par->hook_mask,
par->family),
textify_hooks(allow, sizeof(allow), par->target->hooks,
par->family));
return -EINVAL;
}
if (par->target->proto && (par->target->proto != proto || inv_proto)) {
pr_err("%s_tables: %s target: only valid for protocol %u\n",
xt_prefix[par->family], par->target->name,
par->target->proto);
return -EINVAL;
}
if (par->target->checkentry != NULL) {
ret = par->target->checkentry(par);
if (ret < 0)
return ret;
else if (ret > 0)
/* Flag up potential errors. */
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xt_check_target);
#ifdef CONFIG_COMPAT
int xt_compat_target_offset(const struct xt_target *target)
{
u_int16_t csize = target->compatsize ? : target->targetsize;
return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_target_offset);
void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
int pad, off = xt_compat_target_offset(target);
u_int16_t tsize = ct->u.user.target_size;
t = *dstptr;
memcpy(t, ct, sizeof(*ct));
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
memcpy(t->data, ct->data, tsize - sizeof(*ct));
pad = XT_ALIGN(target->targetsize) - target->targetsize;
if (pad > 0)
memset(t->data + target->targetsize, 0, pad);
tsize += off;
t->u.user.target_size = tsize;
*size += off;
*dstptr += tsize;
}
EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target __user *ct = *dstptr;
int off = xt_compat_target_offset(target);
u_int16_t tsize = t->u.user.target_size - off;
if (copy_to_user(ct, t, sizeof(*ct)) ||
put_user(tsize, &ct->u.user.target_size) ||
copy_to_user(ct->u.user.name, t->u.kernel.target->name,
strlen(t->u.kernel.target->name) + 1))
return -EFAULT;
if (target->compat_to_user) {
if (target->compat_to_user((void __user *)ct->data, t->data))
return -EFAULT;
} else {
if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
return -EFAULT;
}
*size -= off;
*dstptr += tsize;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
#endif
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
struct xt_table_info *info = NULL;
size_t sz = sizeof(*info) + size;
if (sz < sizeof(*info))
return NULL;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!info) {
info = vmalloc(sz);
if (!info)
return NULL;
}
memset(info, 0, sizeof(*info));
info->size = size;
return info;
}
EXPORT_SYMBOL(xt_alloc_table_info);
void xt_free_table_info(struct xt_table_info *info)
{
int cpu;
if (info->jumpstack != NULL) {
for_each_possible_cpu(cpu)
kvfree(info->jumpstack[cpu]);
kvfree(info->jumpstack);
}
kvfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name)
{
struct xt_table *t, *found = NULL;
mutex_lock(&xt[af].mutex);
list_for_each_entry(t, &net->xt.tables[af], list)
if (strcmp(t->name, name) == 0 && try_module_get(t->me))
return t;
if (net == &init_net)
goto out;
/* Table doesn't exist in this netns, re-try init */
list_for_each_entry(t, &init_net.xt.tables[af], list) {
if (strcmp(t->name, name))
continue;
if (!try_module_get(t->me))
return NULL;
mutex_unlock(&xt[af].mutex);
if (t->table_init(net) != 0) {
module_put(t->me);
return NULL;
}
found = t;
mutex_lock(&xt[af].mutex);
break;
}
if (!found)
goto out;
/* and once again: */
list_for_each_entry(t, &net->xt.tables[af], list)
if (strcmp(t->name, name) == 0)
return t;
module_put(found->me);
out:
mutex_unlock(&xt[af].mutex);
return NULL;
}
EXPORT_SYMBOL_GPL(xt_find_table_lock);
void xt_table_unlock(struct xt_table *table)
{
mutex_unlock(&xt[table->af].mutex);
}
EXPORT_SYMBOL_GPL(xt_table_unlock);
#ifdef CONFIG_COMPAT
void xt_compat_lock(u_int8_t af)
{
mutex_lock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_lock);
void xt_compat_unlock(u_int8_t af)
{
mutex_unlock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_unlock);
#endif
DEFINE_PER_CPU(seqcount_t, xt_recseq);
EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
struct static_key xt_tee_enabled __read_mostly;
EXPORT_SYMBOL_GPL(xt_tee_enabled);
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
unsigned int size;
int cpu;
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
i->jumpstack = vzalloc(size);
else
i->jumpstack = kzalloc(size, GFP_KERNEL);
if (i->jumpstack == NULL)
return -ENOMEM;
/* ruleset without jumps -- no stack needed */
if (i->stacksize == 0)
return 0;
/* Jumpstack needs to be able to record two full callchains, one
* from the first rule set traversal, plus one table reentrancy
* via -j TEE without clobbering the callchain that brought us to
* TEE target.
*
* This is done by allocating two jumpstacks per cpu, on reentry
* the upper half of the stack is used.
*
* see the jumpstack setup in ipt_do_table() for more details.
*/
size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
if (size > PAGE_SIZE)
i->jumpstack[cpu] = vmalloc_node(size,
cpu_to_node(cpu));
else
i->jumpstack[cpu] = kmalloc_node(size,
GFP_KERNEL, cpu_to_node(cpu));
if (i->jumpstack[cpu] == NULL)
/*
* Freeing will be done later on by the callers. The
* chain is: xt_replace_table -> __do_replace ->
* do_replace -> xt_free_table_info.
*/
return -ENOMEM;
}
return 0;
}
struct xt_table_info *
xt_replace_table(struct xt_table *table,
unsigned int num_counters,
struct xt_table_info *newinfo,
int *error)
{
struct xt_table_info *private;
int ret;
ret = xt_jumpstack_alloc(newinfo);
if (ret < 0) {
*error = ret;
return NULL;
}
/* Do the substitution. */
local_bh_disable();
private = table->private;
/* Check inside lock: is the old number correct? */
if (num_counters != private->number) {
pr_debug("num_counters != table->private->number (%u/%u)\n",
num_counters, private->number);
local_bh_enable();
*error = -EAGAIN;
return NULL;
}
newinfo->initial_entries = private->initial_entries;
/*
* Ensure contents of newinfo are visible before assigning to
* private.
*/
smp_wmb();
table->private = newinfo;
/*
* Even though table entries have now been swapped, other CPU's
* may still be using the old entries. This is okay, because
* resynchronization happens because of the locking done
* during the get_counters() routine.
*/
local_bh_enable();
#ifdef CONFIG_AUDIT
if (audit_enabled) {
struct audit_buffer *ab;
ab = audit_log_start(current->audit_context, GFP_KERNEL,
AUDIT_NETFILTER_CFG);
if (ab) {
audit_log_format(ab, "table=%s family=%u entries=%u",
table->name, table->af,
private->number);
audit_log_end(ab);
}
}
#endif
return private;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *input_table,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo)
{
int ret;
struct xt_table_info *private;
struct xt_table *t, *table;
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto out;
}
mutex_lock(&xt[table->af].mutex);
/* Don't autoload: we'd eat our tail... */
list_for_each_entry(t, &net->xt.tables[table->af], list) {
if (strcmp(t->name, table->name) == 0) {
ret = -EEXIST;
goto unlock;
}
}
/* Simplifies replace_table code. */
table->private = bootstrap;
if (!xt_replace_table(table, 0, newinfo, &ret))
goto unlock;
private = table->private;
pr_debug("table->private->number = %u\n", private->number);
/* save number of initial entries */
private->initial_entries = private->number;
list_add(&table->list, &net->xt.tables[table->af]);
mutex_unlock(&xt[table->af].mutex);
return table;
unlock:
mutex_unlock(&xt[table->af].mutex);
kfree(table);
out:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(xt_register_table);
void *xt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
mutex_lock(&xt[table->af].mutex);
private = table->private;
list_del(&table->list);
mutex_unlock(&xt[table->af].mutex);
kfree(table);
return private;
}
EXPORT_SYMBOL_GPL(xt_unregister_table);
#ifdef CONFIG_PROC_FS
struct xt_names_priv {
struct seq_net_private p;
u_int8_t af;
};
static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
{
struct xt_names_priv *priv = seq->private;
struct net *net = seq_file_net(seq);
u_int8_t af = priv->af;
mutex_lock(&xt[af].mutex);
return seq_list_start(&net->xt.tables[af], *pos);
}
static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct xt_names_priv *priv = seq->private;
struct net *net = seq_file_net(seq);
u_int8_t af = priv->af;
return seq_list_next(v, &net->xt.tables[af], pos);
}
static void xt_table_seq_stop(struct seq_file *seq, void *v)
{
struct xt_names_priv *priv = seq->private;
u_int8_t af = priv->af;
mutex_unlock(&xt[af].mutex);
}
static int xt_table_seq_show(struct seq_file *seq, void *v)
{
struct xt_table *table = list_entry(v, struct xt_table, list);
if (*table->name)
seq_printf(seq, "%s\n", table->name);
return 0;
}
static const struct seq_operations xt_table_seq_ops = {
.start = xt_table_seq_start,
.next = xt_table_seq_next,
.stop = xt_table_seq_stop,
.show = xt_table_seq_show,
};
static int xt_table_open(struct inode *inode, struct file *file)
{
int ret;
struct xt_names_priv *priv;
ret = seq_open_net(inode, file, &xt_table_seq_ops,
sizeof(struct xt_names_priv));
if (!ret) {
priv = ((struct seq_file *)file->private_data)->private;
priv->af = (unsigned long)PDE_DATA(inode);
}
return ret;
}
static const struct file_operations xt_table_ops = {
.owner = THIS_MODULE,
.open = xt_table_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
/*
* Traverse state for ip{,6}_{tables,matches} for helping crossing
* the multi-AF mutexes.
*/
struct nf_mttg_trav {
struct list_head *head, *curr;
uint8_t class, nfproto;
};
enum {
MTTG_TRAV_INIT,
MTTG_TRAV_NFP_UNSPEC,
MTTG_TRAV_NFP_SPEC,
MTTG_TRAV_DONE,
};
static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
bool is_target)
{
static const uint8_t next_class[] = {
[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
[MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
};
struct nf_mttg_trav *trav = seq->private;
switch (trav->class) {
case MTTG_TRAV_INIT:
trav->class = MTTG_TRAV_NFP_UNSPEC;
mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
trav->head = trav->curr = is_target ?
&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
break;
case MTTG_TRAV_NFP_UNSPEC:
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
mutex_lock(&xt[trav->nfproto].mutex);
trav->head = trav->curr = is_target ?
&xt[trav->nfproto].target : &xt[trav->nfproto].match;
trav->class = next_class[trav->class];
break;
case MTTG_TRAV_NFP_SPEC:
trav->curr = trav->curr->next;
if (trav->curr != trav->head)
break;
/* fallthru, _stop will unlock */
default:
return NULL;
}
if (ppos != NULL)
++*ppos;
return trav;
}
static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
bool is_target)
{
struct nf_mttg_trav *trav = seq->private;
unsigned int j;
trav->class = MTTG_TRAV_INIT;
for (j = 0; j < *pos; ++j)
if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
return NULL;
return trav;
}
static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
{
struct nf_mttg_trav *trav = seq->private;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
break;
case MTTG_TRAV_NFP_SPEC:
mutex_unlock(&xt[trav->nfproto].mutex);
break;
}
}
static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
{
return xt_mttg_seq_start(seq, pos, false);
}
static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return xt_mttg_seq_next(seq, v, ppos, false);
}
static int xt_match_seq_show(struct seq_file *seq, void *v)
{
const struct nf_mttg_trav *trav = seq->private;
const struct xt_match *match;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
case MTTG_TRAV_NFP_SPEC:
if (trav->curr == trav->head)
return 0;
match = list_entry(trav->curr, struct xt_match, list);
if (*match->name)
seq_printf(seq, "%s\n", match->name);
}
return 0;
}
static const struct seq_operations xt_match_seq_ops = {
.start = xt_match_seq_start,
.next = xt_match_seq_next,
.stop = xt_mttg_seq_stop,
.show = xt_match_seq_show,
};
static int xt_match_open(struct inode *inode, struct file *file)
{
struct nf_mttg_trav *trav;
trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
if (!trav)
return -ENOMEM;
trav->nfproto = (unsigned long)PDE_DATA(inode);
return 0;
}
static const struct file_operations xt_match_ops = {
.owner = THIS_MODULE,
.open = xt_match_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
{
return xt_mttg_seq_start(seq, pos, true);
}
static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{
return xt_mttg_seq_next(seq, v, ppos, true);
}
static int xt_target_seq_show(struct seq_file *seq, void *v)
{
const struct nf_mttg_trav *trav = seq->private;
const struct xt_target *target;
switch (trav->class) {
case MTTG_TRAV_NFP_UNSPEC:
case MTTG_TRAV_NFP_SPEC:
if (trav->curr == trav->head)
return 0;
target = list_entry(trav->curr, struct xt_target, list);
if (*target->name)
seq_printf(seq, "%s\n", target->name);
}
return 0;
}
static const struct seq_operations xt_target_seq_ops = {
.start = xt_target_seq_start,
.next = xt_target_seq_next,
.stop = xt_mttg_seq_stop,
.show = xt_target_seq_show,
};
static int xt_target_open(struct inode *inode, struct file *file)
{
struct nf_mttg_trav *trav;
trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
if (!trav)
return -ENOMEM;
trav->nfproto = (unsigned long)PDE_DATA(inode);
return 0;
}
static const struct file_operations xt_target_ops = {
.owner = THIS_MODULE,
.open = xt_target_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#define FORMAT_TABLES "_tables_names"
#define FORMAT_MATCHES "_tables_matches"
#define FORMAT_TARGETS "_tables_targets"
#endif /* CONFIG_PROC_FS */
/**
* xt_hook_ops_alloc - set up hooks for a new table
* @table: table with metadata needed to set up hooks
* @fn: Hook function
*
* This function will create the nf_hook_ops that the x_table needs
* to hand to xt_hook_link_net().
*/
struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
{
unsigned int hook_mask = table->valid_hooks;
uint8_t i, num_hooks = hweight32(hook_mask);
uint8_t hooknum;
struct nf_hook_ops *ops;
ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
hook_mask >>= 1, ++hooknum) {
if (!(hook_mask & 1))
continue;
ops[i].hook = fn;
ops[i].pf = table->af;
ops[i].hooknum = hooknum;
ops[i].priority = table->priority;
++i;
}
return ops;
}
EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
int xt_proto_init(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
struct proc_dir_entry *proc;
kuid_t root_uid;
kgid_t root_gid;
#endif
if (af >= ARRAY_SIZE(xt_prefix))
return -EINVAL;
#ifdef CONFIG_PROC_FS
root_uid = make_kuid(net->user_ns, 0);
root_gid = make_kgid(net->user_ns, 0);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
(void *)(unsigned long)af);
if (!proc)
goto out;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
(void *)(unsigned long)af);
if (!proc)
goto out_remove_tables;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
(void *)(unsigned long)af);
if (!proc)
goto out_remove_matches;
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(proc, root_uid, root_gid);
#endif
return 0;
#ifdef CONFIG_PROC_FS
out_remove_matches:
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out_remove_tables:
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
out:
return -1;
#endif
}
EXPORT_SYMBOL_GPL(xt_proto_init);
void xt_proto_fini(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
char buf[XT_FUNCTION_MAXNAMELEN];
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TABLES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_TARGETS, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
strlcpy(buf, xt_prefix[af], sizeof(buf));
strlcat(buf, FORMAT_MATCHES, sizeof(buf));
remove_proc_entry(buf, net->proc_net);
#endif /*CONFIG_PROC_FS*/
}
EXPORT_SYMBOL_GPL(xt_proto_fini);
static int __net_init xt_net_init(struct net *net)
{
int i;
for (i = 0; i < NFPROTO_NUMPROTO; i++)
INIT_LIST_HEAD(&net->xt.tables[i]);
return 0;
}
static struct pernet_operations xt_net_ops = {
.init = xt_net_init,
};
static int __init xt_init(void)
{
unsigned int i;
int rv;
for_each_possible_cpu(i) {
seqcount_init(&per_cpu(xt_recseq, i));
}
xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
if (!xt)
return -ENOMEM;
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
mutex_init(&xt[i].mutex);
#ifdef CONFIG_COMPAT
mutex_init(&xt[i].compat_mutex);
xt[i].compat_tab = NULL;
#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
}
rv = register_pernet_subsys(&xt_net_ops);
if (rv < 0)
kfree(xt);
return rv;
}
static void __exit xt_fini(void)
{
unregister_pernet_subsys(&xt_net_ops);
kfree(xt);
}
module_init(xt_init);
module_exit(xt_fini);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5000_0 |
crossvul-cpp_data_good_5668_1 | /*
* linux/mm/memory.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
/*
* demand-loading started 01.12.91 - seems it is high on the list of
* things wanted, and it should be easy to implement. - Linus
*/
/*
* Ok, demand-loading was easy, shared pages a little bit tricker. Shared
* pages started 02.12.91, seems to work. - Linus.
*
* Tested sharing by executing about 30 /bin/sh: under the old kernel it
* would have taken more than the 6M I have free, but it worked well as
* far as I could see.
*
* Also corrected some "invalidate()"s - I wasn't doing enough of them.
*/
/*
* Real VM (paging to/from disk) started 18.12.91. Much more work and
* thought has to go into this. Oh, well..
* 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
* Found it. Everything seems to work now.
* 20.12.91 - Ok, making the swap-device changeable like the root.
*/
/*
* 05.04.94 - Multi-page memory management added for v1.1.
* Idea by Alex Bligh (alex@cconcepts.co.uk)
*
* 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
* (Gerhard.Wichert@pdb.siemens.de)
*
* Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
*/
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/kallsyms.h>
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include "internal.h"
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
struct page *mem_map;
EXPORT_SYMBOL(max_mapnr);
EXPORT_SYMBOL(mem_map);
#endif
unsigned long num_physpages;
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
* of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
* and ZONE_HIGHMEM.
*/
void * high_memory;
EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(high_memory);
/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
* ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
* as ancient (libc5 based) binaries can segfault. )
*/
int randomize_va_space __read_mostly =
#ifdef CONFIG_COMPAT_BRK
1;
#else
2;
#endif
static int __init disable_randmaps(char *s)
{
randomize_va_space = 0;
return 1;
}
__setup("norandmaps", disable_randmaps);
unsigned long zero_pfn __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
/*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/
static int __init init_zero_pfn(void)
{
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
core_initcall(init_zero_pfn);
#if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
add_mm_counter(mm, i, current->rss_stat.count[i]);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}
static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
{
struct task_struct *task = current;
if (likely(task->mm == mm))
task->rss_stat.count[member] += val;
else
add_mm_counter(mm, member, val);
}
#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
/* sync counter once per 64 page faults */
#define TASK_RSS_EVENTS_THRESH (64)
static void check_sync_rss_stat(struct task_struct *task)
{
if (unlikely(task != current))
return;
if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
sync_mm_rss(task->mm);
}
#else /* SPLIT_RSS_COUNTING */
#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
static void check_sync_rss_stat(struct task_struct *task)
{
}
#endif /* SPLIT_RSS_COUNTING */
#ifdef HAVE_GENERIC_MMU_GATHER
static int tlb_next_batch(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
batch = tlb->active;
if (batch->next) {
tlb->active = batch->next;
return 1;
}
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
return 0;
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (!batch)
return 0;
tlb->batch_count++;
batch->next = NULL;
batch->nr = 0;
batch->max = MAX_GATHER_BATCH;
tlb->active->next = batch;
tlb->active = batch;
return 1;
}
/* tlb_gather_mmu
* Called to initialize an (on-stack) mmu_gather structure for page-table
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
{
tlb->mm = mm;
tlb->fullmm = fullmm;
tlb->need_flush_all = 0;
tlb->start = -1UL;
tlb->end = 0;
tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
}
void tlb_flush_mmu(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
if (tlb_fast_mode(tlb))
return;
for (batch = &tlb->local; batch; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
struct mmu_gather_batch *batch, *next;
tlb->start = start;
tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}
/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*/
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->need_flush);
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return 1; /* avoid calling tlb_flush_mmu() */
}
batch = tlb->active;
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return 0;
batch = tlb->active;
}
VM_BUG_ON(batch->nr > batch->max);
return batch->max - batch->nr;
}
#endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* See the comment near struct mmu_table_batch.
*/
static void tlb_remove_table_smp_sync(void *arg)
{
/* Simply deliver the interrupt */
}
static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
* assumed to be actually RCU-freed.
*
* It is however sufficient for software page-table walkers that rely on
* IRQ disabling. See the comment near struct mmu_table_batch.
*/
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
int i;
batch = container_of(head, struct mmu_table_batch, rcu);
for (i = 0; i < batch->nr; i++)
__tlb_remove_table(batch->tables[i]);
free_page((unsigned long)batch);
}
void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
tlb->need_flush = 1;
/*
* When there's less then two users of this mm there cannot be a
* concurrent page-table walk.
*/
if (atomic_read(&tlb->mm->mm_users) < 2) {
__tlb_remove_table(table);
return;
}
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_table_flush(tlb);
}
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
* If a p?d_bad entry is found while walking page tables, report
* the error, before resetting entry to p?d_none. Usually (but
* very seldom) called out from the p?d_none_or_clear_bad macros.
*/
void pgd_clear_bad(pgd_t *pgd)
{
pgd_ERROR(*pgd);
pgd_clear(pgd);
}
void pud_clear_bad(pud_t *pud)
{
pud_ERROR(*pud);
pud_clear(pud);
}
void pmd_clear_bad(pmd_t *pmd)
{
pmd_ERROR(*pmd);
pmd_clear(pmd);
}
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
tlb->mm->nr_ptes--;
}
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pmd_t *pmd;
unsigned long next;
unsigned long start;
start = addr;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
start &= PUD_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PUD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pud_t *pud;
unsigned long next;
unsigned long start;
start = addr;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
start &= PGDIR_MASK;
if (start < floor)
return;
if (ceiling) {
ceiling &= PGDIR_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
return;
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
}
/*
* This function frees user-level page tables of a process.
*
* Must be called with pagetable lock held.
*/
void free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
unsigned long next;
/*
* The next few lines have given us lots of grief...
*
* Why are we testing PMD* at this top level? Because often
* there will be no work to do at all, and we'd prefer not to
* go all the way down to the bottom just to discover that.
*
* Why all these "- 1"s? Because 0 represents both the bottom
* of the address space and the top of it (using -1 for the
* top wouldn't help much: the masks would do the wrong thing).
* The rule is that addr 0 and floor 0 refer to the bottom of
* the address space, but end 0 and ceiling 0 refer to the top
* Comparisons need to use "end - 1" and "ceiling - 1" (though
* that end 0 case should be mythical).
*
* Wherever addr is brought up or ceiling brought down, we must
* be careful to reject "the opposite 0" before it confuses the
* subsequent tests. But what about where end is brought down
* by PMD_SIZE below? no, end can't go down to 0 there.
*
* Whereas we round start (addr) and ceiling down, by different
* masks at different levels, in order to test whether a table
* now has no other vmas using it, so can be freed, we don't
* bother to round floor or end up - the tests don't need that.
*/
addr &= PMD_MASK;
if (addr < floor) {
addr += PMD_SIZE;
if (!addr)
return;
}
if (ceiling) {
ceiling &= PMD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
end -= PMD_SIZE;
if (addr > end - 1)
return;
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long floor, unsigned long ceiling)
{
while (vma) {
struct vm_area_struct *next = vma->vm_next;
unsigned long addr = vma->vm_start;
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
unlink_anon_vmas(vma);
unlink_file_vma(vma);
if (is_vm_hugetlb_page(vma)) {
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
} else {
/*
* Optimization: gather nearby vmas into one call down
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = vma->vm_next;
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
}
vma = next;
}
}
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long address)
{
pgtable_t new = pte_alloc_one(mm, address);
int wait_split_huge_page;
if (!new)
return -ENOMEM;
/*
* Ensure all pte setup (eg. pte page lock and page clearing) are
* visible before the pte is made visible to other CPUs by being
* put into page tables.
*
* The other side of the story is the pointer chasing in the page
* table walking code (when walking the page table without locking;
* ie. most of the time). Fortunately, these data accesses consist
* of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
spin_lock(&mm->page_table_lock);
wait_split_huge_page = 0;
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
mm->nr_ptes++;
pmd_populate(mm, pmd, new);
new = NULL;
} else if (unlikely(pmd_trans_splitting(*pmd)))
wait_split_huge_page = 1;
spin_unlock(&mm->page_table_lock);
if (new)
pte_free(mm, new);
if (wait_split_huge_page)
wait_split_huge_page(vma->anon_vma, pmd);
return 0;
}
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
new = NULL;
} else
VM_BUG_ON(pmd_trans_splitting(*pmd));
spin_unlock(&init_mm.page_table_lock);
if (new)
pte_free_kernel(&init_mm, new);
return 0;
}
static inline void init_rss_vec(int *rss)
{
memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
}
static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
{
int i;
if (current->mm == mm)
sync_mm_rss(mm);
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i]);
}
/*
* This function is called to print an error when a bad pte
* is found. For example, we might have a PFN-mapped pte in
* a region that doesn't allow it.
*
* The calling function must still handle the error.
*/
static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, struct page *page)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
struct address_space *mapping;
pgoff_t index;
static unsigned long resume;
static unsigned long nr_shown;
static unsigned long nr_unshown;
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
*/
if (nr_shown == 60) {
if (time_before(jiffies, resume)) {
nr_unshown++;
return;
}
if (nr_unshown) {
printk(KERN_ALERT
"BUG: Bad page map: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
}
if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
index = linear_page_index(vma, addr);
printk(KERN_ALERT
"BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
current->comm,
(long long)pte_val(pte), (long long)pmd_val(*pmd));
if (page)
dump_page(page);
printk(KERN_ALERT
"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
/*
* Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
*/
if (vma->vm_ops)
print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
(unsigned long)vma->vm_ops->fault);
if (vma->vm_file && vma->vm_file->f_op)
print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
(unsigned long)vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}
static inline bool is_cow_mapping(vm_flags_t flags)
{
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
/*
* vm_normal_page -- This function gets the "struct page" associated with a pte.
*
* "Special" mappings do not wish to be associated with a "struct page" (either
* it doesn't exist, or it exists but they don't want to touch it). In this
* case, NULL is returned here. "Normal" mappings do have a struct page.
*
* There are 2 broad cases. Firstly, an architecture may define a pte_special()
* pte bit, in which case this function is trivial. Secondly, an architecture
* may not have a spare pte bit, which requires a more complicated scheme,
* described below.
*
* A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
* special mapping (even if there are underlying and valid "struct pages").
* COWed pages of a VM_PFNMAP are always normal.
*
* The way we recognize COWed pages within VM_PFNMAP mappings is through the
* rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
* set, and the vm_pgoff will point to the first PFN mapped: thus every special
* mapping will always honor the rule
*
* pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
*
* And for normal mappings this is false.
*
* This restricts such mappings to be a linear translation from virtual address
* to pfn. To get around this restriction, we allow arbitrary mappings so long
* as the vma is not a COW mapping; in that case, we know that all ptes are
* special (because none can have been COWed).
*
*
* In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
*
* VM_MIXEDMAP mappings can likewise contain memory with or without "struct
* page" backing, however the difference is that _all_ pages with a struct
* page (that is, those where pfn_valid is true) are refcounted and considered
* normal pages by the VM. The disadvantage is that pages are refcounted
* (which can be slower and simply not an option for some PFNMAP users). The
* advantage is that we don't have to follow the strict linearity rule of
* PFNMAP mappings in order to support COWable mappings.
*
*/
#ifdef __HAVE_ARCH_PTE_SPECIAL
# define HAVE_PTE_SPECIAL 1
#else
# define HAVE_PTE_SPECIAL 0
#endif
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte)))
goto check_pfn;
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL;
if (!is_zero_pfn(pfn))
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
/* !HAVE_PTE_SPECIAL case follows: */
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
return NULL;
goto out;
} else {
unsigned long off;
off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
if (!is_cow_mapping(vma->vm_flags))
return NULL;
}
}
if (is_zero_pfn(pfn))
return NULL;
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
/*
* NOTE! We still have PageReserved() pages in the page tables.
* eg. VDSO mappings can cause them to exist.
*/
out:
return pfn_to_page(pfn);
}
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
* covered by this vma.
*/
static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
if (swap_duplicate(entry) < 0)
return entry.val;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
if (likely(!non_swap_entry(entry)))
rss[MM_SWAPENTS]++;
else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
}
goto out_set_pte;
}
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
/*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
page_dup_rmap(page);
if (PageAnon(page))
rss[MM_ANONPAGES]++;
else
rss[MM_FILEPAGES]++;
}
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
return 0;
}
int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pte_t *orig_src_pte, *orig_dst_pte;
pte_t *src_pte, *dst_pte;
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0};
again:
init_rss_vec(rss);
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
if (!dst_pte)
return -ENOMEM;
src_pte = pte_offset_map(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode();
do {
/*
* We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU.
*/
if (progress >= 32) {
progress = 0;
if (need_resched() ||
spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
break;
}
if (pte_none(*src_pte)) {
progress++;
continue;
}
entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
vma, addr, rss);
if (entry.val)
break;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
pte_unmap(orig_src_pte);
add_mm_rss_vec(dst_mm, rss);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
if (entry.val) {
if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
return -ENOMEM;
progress = 0;
}
if (addr != end)
goto again;
return 0;
}
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pmd_t *src_pmd, *dst_pmd;
unsigned long next;
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
if (!dst_pmd)
return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*src_pmd)) {
int err;
VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
err = copy_huge_pmd(dst_mm, src_mm,
dst_pmd, src_pmd, addr, vma);
if (err == -ENOMEM)
return -ENOMEM;
if (!err)
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(src_pmd))
continue;
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
vma, addr, next))
return -ENOMEM;
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
return 0;
}
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
unsigned long next;
dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
vma, addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
}
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long next;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
bool is_cow;
int ret;
/*
* Don't copy ptes where a page fault will fill them correctly.
* Fork becomes much lighter when there are big shared or private
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
VM_PFNMAP | VM_MIXEDMAP))) {
if (!vma->anon_vma)
return 0;
}
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
if (unlikely(vma->vm_flags & VM_PFNMAP)) {
/*
* We do not free on error cases below as remove_vma
* gets called on error from higher level routine
*/
ret = track_pfn_copy(vma);
if (ret)
return ret;
}
/*
* We need to invalidate the secondary MMU mappings only when
* there could be a permission downgrade on the ptes of the
* parent mm. And a permission downgrade will only happen if
* is_cow_mapping() returns true.
*/
is_cow = is_cow_mapping(vma->vm_flags);
mmun_start = addr;
mmun_end = end;
if (is_cow)
mmu_notifier_invalidate_range_start(src_mm, mmun_start,
mmun_end);
ret = 0;
dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(src_pgd))
continue;
if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next))) {
ret = -ENOMEM;
break;
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
if (is_cow)
mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
return ret;
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
struct mm_struct *mm = tlb->mm;
int force_flush = 0;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
if (pte_none(ptent)) {
continue;
}
if (pte_present(ptent)) {
struct page *page;
page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
* invalidate cache without truncating:
* unmap shared but keep private pages.
*/
if (details->check_mapping &&
details->check_mapping != page->mapping)
continue;
/*
* Each page->index must be checked when
* invalidating or truncating nonlinear.
*/
if (details->nonlinear_vma &&
(page->index < details->first_index ||
page->index > details->last_index))
continue;
}
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
addr) != page->index)
set_pte_at(mm, addr, pte,
pgoff_to_pte(page->index));
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
if (pte_dirty(ptent))
set_page_dirty(page);
if (pte_young(ptent) &&
likely(!VM_SequentialReadHint(vma)))
mark_page_accessed(page);
rss[MM_FILEPAGES]--;
}
page_remove_rmap(page);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
force_flush = !__tlb_remove_page(tlb, page);
if (force_flush)
break;
continue;
}
/*
* If details->check_mapping, we leave swap entries;
* if details->nonlinear_vma, we leave file entries.
*/
if (unlikely(details))
continue;
if (pte_file(ptent)) {
if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
print_bad_pte(vma, addr, ptent, NULL);
} else {
swp_entry_t entry = pte_to_swp_entry(ptent);
if (!non_swap_entry(entry))
rss[MM_SWAPENTS]--;
else if (is_migration_entry(entry)) {
struct page *page;
page = migration_entry_to_page(entry);
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else
rss[MM_FILEPAGES]--;
}
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
}
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end);
add_mm_rss_vec(mm, rss);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(start_pte, ptl);
/*
* mmu_gather ran out of room to batch pages, we break out of
* the PTE lock to avoid doing the potential expensive TLB invalidate
* and page-free while holding it.
*/
if (force_flush) {
force_flush = 0;
#ifdef HAVE_GENERIC_MMU_GATHER
tlb->start = addr;
tlb->end = end;
#endif
tlb_flush_mmu(tlb);
if (addr != end)
goto again;
}
return addr;
}
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pmd_t *pmd;
unsigned long next;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE) {
#ifdef CONFIG_DEBUG_VM
if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
__func__, addr, end,
vma->vm_start,
vma->vm_end);
BUG();
}
#endif
split_huge_page_pmd(vma, addr, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
}
/*
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
* none or trans huge it can change under us. This is
* because MADV_DONTNEED holds the mmap_sem in read
* mode.
*/
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
goto next;
next = zap_pte_range(tlb, vma, pmd, addr, next, details);
next:
cond_resched();
} while (pmd++, addr = next, addr != end);
return addr;
}
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pud_t *pud;
unsigned long next;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
next = zap_pmd_range(tlb, vma, pud, addr, next, details);
} while (pud++, addr = next, addr != end);
return addr;
}
static void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
pgd_t *pgd;
unsigned long next;
if (details && !details->check_mapping && !details->nonlinear_vma)
details = NULL;
BUG_ON(addr >= end);
mem_cgroup_uncharge_start();
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
next = zap_pud_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
mem_cgroup_uncharge_end();
}
static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr,
struct zap_details *details)
{
unsigned long start = max(vma->vm_start, start_addr);
unsigned long end;
if (start >= vma->vm_end)
return;
end = min(vma->vm_end, end_addr);
if (end <= vma->vm_start)
return;
if (vma->vm_file)
uprobe_munmap(vma, start, end);
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn(vma, 0, 0);
if (start != end) {
if (unlikely(is_vm_hugetlb_page(vma))) {
/*
* It is undesirable to test vma->vm_file as it
* should be non-null for valid hugetlb area.
* However, vm_file will be NULL in the error
* cleanup path of do_mmap_pgoff. When
* hugetlbfs ->mmap method fails,
* do_mmap_pgoff() nullifies vma->vm_file
* before calling this function to clean up.
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
if (vma->vm_file) {
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
} else
unmap_page_range(tlb, vma, start, end, details);
}
}
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
*
* Unmap all pages in the vma list.
*
* Only addresses between `start' and `end' will be unmapped.
*
* The VMA list must be sorted in ascending virtual address order.
*
* unmap_vmas() assumes that the caller will flush the whole unmapped address
* range after unmap_vmas() returns. So the only responsibility here is to
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr)
{
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
}
/**
* zap_page_range - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @start: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of nonlinear truncation or shared cache invalidation
*
* Caller must protect the VMA list
*/
void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = start + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, details);
mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end);
}
/**
* zap_page_range_single - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
* @details: details of nonlinear truncation or shared cache invalidation
*
* The range must fit into one VMA.
*/
static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = address + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(mm, address, end);
tlb_finish_mmu(&tlb, address, end);
}
/**
* zap_vma_ptes - remove ptes mapping the vma
* @vma: vm_area_struct holding ptes to be zapped
* @address: starting address of pages to zap
* @size: number of bytes to zap
*
* This function only unmaps ptes assigned to VM_PFNMAP vmas.
*
* The entire address range must be fully contained within the vma.
*
* Returns 0 if successful.
*/
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
return -1;
zap_page_range_single(vma, address, size, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @page_mask: on output, *page_mask is set according to the size of the page
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* Returns the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned int *page_mask)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
*page_mask = 0;
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
goto out;
}
page = NULL;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto no_page_table;
pud = pud_offset(pgd, address);
if (pud_none(*pud))
goto no_page_table;
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
BUG_ON(flags & FOLL_GET);
page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
goto out;
}
if (unlikely(pud_bad(*pud)))
goto no_page_table;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
goto no_page_table;
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
BUG_ON(flags & FOLL_GET);
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
goto out;
}
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
goto no_page_table;
if (pmd_trans_huge(*pmd)) {
if (flags & FOLL_SPLIT) {
split_huge_page_pmd(vma, address, pmd);
goto split_fallthrough;
}
spin_lock(&mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
page = follow_trans_huge_pmd(vma, address,
pmd, flags);
spin_unlock(&mm->page_table_lock);
*page_mask = HPAGE_PMD_NR - 1;
goto out;
}
} else
spin_unlock(&mm->page_table_lock);
/* fall through */
}
split_fallthrough:
if (unlikely(pmd_bad(*pmd)))
goto no_page_table;
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte) || pte_file(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto split_fallthrough;
}
if ((flags & FOLL_NUMA) && pte_numa(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
page = vm_normal_page(vma, address, pte);
if (unlikely(!page)) {
if ((flags & FOLL_DUMP) ||
!is_zero_pfn(pte_pfn(pte)))
goto bad_page;
page = pte_page(pte);
}
if (flags & FOLL_GET)
get_page_foll(page);
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
unlock:
pte_unmap_unlock(ptep, ptl);
out:
return page;
bad_page:
pte_unmap_unlock(ptep, ptl);
return ERR_PTR(-EFAULT);
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return page;
no_page_table:
/*
* When core dumping an enormous anonymous area that nobody
* has touched so far, we don't want to allocate unnecessary pages or
* page tables. Return error instead of NULL to skip handle_mm_fault,
* then get_dump_page() will return NULL to leave a hole in the dump.
* But we can only make this optimization where a hole would surely
* be zero-filled if handle_mm_fault() actually did handle it.
*/
if ((flags & FOLL_DUMP) &&
(!vma->vm_ops || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
return page;
}
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
return stack_guard_page_start(vma, addr) ||
stack_guard_page_end(vma, addr+PAGE_SIZE);
}
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
* @nonblocking: whether waiting for disk IO or mmap_sem contention
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
*
* Must be called with mmap_sem held for read or write.
*
* __get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* __get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
* the page is written to, set_page_dirty (or set_page_dirty_lock, as
* appropriate) must be called after the page is finished with, and
* before put_page is called.
*
* If @nonblocking != NULL, __get_user_pages will not wait for disk IO
* or mmap_sem contention, and if waiting is needed to pin all pages,
* *@nonblocking will be set to 0.
*
* In most cases, get_user_pages or get_user_pages_fast should be used
* instead of __get_user_pages. __get_user_pages should be used only if
* you need some special @gup_flags.
*/
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
{
long i;
unsigned long vm_flags;
unsigned int page_mask;
if (!nr_pages)
return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/*
* Require read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = (gup_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
/*
* If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
* would be called on PROT_NONE ranges. We must never invoke
* handle_mm_fault on PROT_NONE ranges or the NUMA hinting
* page faults would unprotect the PROT_NONE ranges if
* _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
* bitflag. So to avoid that, don't set FOLL_NUMA if
* FOLL_FORCE is set.
*/
if (!(gup_flags & FOLL_FORCE))
gup_flags |= FOLL_NUMA;
i = 0;
do {
struct vm_area_struct *vma;
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
pgd = pgd_offset_gate(mm, pg);
BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, pg);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
if (pmd_none(*pmd))
return i ? : -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
pte_unmap(pte);
return i ? : -EFAULT;
}
vma = get_gate_vma(mm);
if (pages) {
struct page *page;
page = vm_normal_page(vma, start, *pte);
if (!page) {
if (!(gup_flags & FOLL_DUMP) &&
is_zero_pfn(pte_pfn(*pte)))
page = pte_page(*pte);
else {
pte_unmap(pte);
return i ? : -EFAULT;
}
}
pages[i] = page;
get_page(page);
}
pte_unmap(pte);
page_mask = 0;
goto next_page;
}
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, gup_flags);
continue;
}
do {
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/*
* If we have a pending SIGKILL, don't keep faulting
* pages and potentially allocating memory.
*/
if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
cond_resched();
while (!(page = follow_page_mask(vma, start,
foll_flags, &page_mask))) {
int ret;
unsigned int fault_flags = 0;
/* For mlock, just skip the stack guard page. */
if (foll_flags & FOLL_MLOCK) {
if (stack_guard_page(vma, start))
goto next_page;
}
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (foll_flags & FOLL_NOWAIT)
fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
ret = handle_mm_fault(mm, vma, start,
fault_flags);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
if (ret & (VM_FAULT_HWPOISON |
VM_FAULT_HWPOISON_LARGE)) {
if (i)
return i;
else if (gup_flags & FOLL_HWPOISON)
return -EHWPOISON;
else
return -EFAULT;
}
if (ret & VM_FAULT_SIGBUS)
return i ? i : -EFAULT;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking)
*nonblocking = 0;
return i;
}
/*
* The VM_FAULT_WRITE bit tells us that
* do_wp_page has broken COW when necessary,
* even if maybe_mkwrite decided not to set
* pte_write. We can thus safely do subsequent
* page lookups as if they were reads. But only
* do so when looping for pte_write is futile:
* in some cases userspace may also be wanting
* to write to the gotten user page, which a
* read fault here might prevent (a readonly
* page might get reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) &&
!(vma->vm_flags & VM_WRITE))
foll_flags &= ~FOLL_WRITE;
cond_resched();
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
page_mask = 0;
}
next_page:
if (vmas) {
vmas[i] = vma;
page_mask = 0;
}
page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
if (page_increm > nr_pages)
page_increm = nr_pages;
i += page_increm;
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
}
EXPORT_SYMBOL(__get_user_pages);
/*
* fixup_user_fault() - manually resolve a user page fault
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm
* @address: user address
* @fault_flags:flags to pass down to handle_mm_fault()
*
* This is meant to be called in the specific scenario where for locking reasons
* we try to access user memory in atomic context (within a pagefault_disable()
* section), this returns -EFAULT, and we want to resolve the user fault before
* trying again.
*
* Typically this is meant to be used by the futex code.
*
* The main difference with get_user_pages() is that this function will
* unconditionally call handle_mm_fault() which will in turn perform all the
* necessary SW fixup of the dirty and young bits in the PTE, while
* handle_mm_fault() only guarantees to update these in the struct page.
*
* This is important for some architectures where those bits also gate the
* access permission to the page because they are maintained in software. On
* such architectures, gup() will not be enough to make a subsequent access
* succeed.
*
* This should be called with the mm_sem held for read.
*/
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags)
{
struct vm_area_struct *vma;
int ret;
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
return -EFAULT;
ret = handle_mm_fault(mm, vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON;
if (ret & VM_FAULT_SIGBUS)
return -EFAULT;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
return 0;
}
/*
* get_user_pages() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller
* @force: whether to force write access even if user mapping is
* readonly. This will result in the page being COWed even
* in MAP_SHARED mappings. You do not want this.
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
*
* Must be called with mmap_sem held for read or write.
*
* get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
* instant. That is, it takes the page that would be accessed if a user
* thread accesses the given user virtual address at that instant.
*
* This does not guarantee that the page exists in the user mappings when
* get_user_pages returns, and there may even be a completely different
* page there in some cases (eg. if mmapped pagecache has been invalidated
* and subsequently re faulted). However it does guarantee that the page
* won't be freed completely. And mostly callers simply care that the page
* contains data that was valid *at some point in time*. Typically, an IO
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
* If write=0, the page must not be written to. If the page is written to,
* set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
* after the page is finished with, and before put_page is called.
*
* get_user_pages is typically used for fewer-copy IO operations, to get a
* handle on the memory by some means other than accesses via the user virtual
* addresses. The pages may be submitted for DMA to devices or accessed via
* their kernel linear mapping (via the kmap APIs). Care should be taken to
* use the correct cache flushing APIs.
*
* See also get_user_pages_fast, for performance critical applications.
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, int write,
int force, struct page **pages, struct vm_area_struct **vmas)
{
int flags = FOLL_TOUCH;
if (pages)
flags |= FOLL_GET;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
NULL);
}
EXPORT_SYMBOL(get_user_pages);
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
*
* Returns struct page pointer of user page pinned for dump,
* to be freed afterwards by page_cache_release() or put_page().
*
* Returns NULL on any kind of failure - a hole must then be inserted into
* the corefile, to preserve alignment with its headers; and also returns
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
* allowing a hole to be left in the corefile to save diskspace.
*
* Called without mmap_sem, but after all other threads have been killed.
*/
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
{
struct vm_area_struct *vma;
struct page *page;
if (__get_user_pages(current, current->mm, addr, 1,
FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
NULL) < 1)
return NULL;
flush_cache_page(vma, addr, page_to_pfn(page));
return page;
}
#endif /* CONFIG_ELF_CORE */
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr);
if (pud) {
pmd_t * pmd = pmd_alloc(mm, pud, addr);
if (pmd) {
VM_BUG_ON(pmd_trans_huge(*pmd));
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
}
return NULL;
}
/*
* This is the old fallback for page remapping.
*
* For historical reasons, it only allows reserved pages. Only
* old drivers should use this, and they needed to mark their
* pages reserved for the old functions anyway.
*/
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte;
spinlock_t *ptl;
retval = -EINVAL;
if (PageAnon(page))
goto out;
retval = -ENOMEM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
get_page(page);
inc_mm_counter_fast(mm, MM_FILEPAGES);
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @page: source kernel page
*
* This allows drivers to insert individual pages they've allocated
* into a user vma.
*
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
* (see split_page()).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
* that. Your vma protection will have to be set up correctly, which
* means that if you want a shared writable mapping, you'd better
* ask for a shared writable mapping!
*
* The page does not need to be reserved.
*
* Usually this function is called from f_op->mmap() handler
* under mm->mmap_sem write-lock, so it can change vma->vm_flags.
* Caller must set VM_MIXEDMAP on vma if it wants to call this
* function from other places, for example from page-fault handler.
*/
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte, entry;
spinlock_t *ptl;
retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
/* Ok, finally just insert the thing.. */
entry = pte_mkspecial(pfn_pte(pfn, prot));
set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
retval = 0;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
return retval;
}
/**
* vm_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_insert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return NULL.
*
* vma cannot be a COW mapping.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*/
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
pgprot_t pgprot = vma->vm_page_prot;
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
* consistency in testing and feature parity among all, so we should
* try to keep these invariants in place for everybody.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (track_pfn_insert(vma, &pgprot, pfn))
return -EINVAL;
ret = insert_pfn(vma, addr, pfn, pgprot);
return ret;
}
EXPORT_SYMBOL(vm_insert_pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
/*
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
* without pte special, it would there be refcounted as a normal page.
*/
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
struct page *page;
page = pfn_to_page(pfn);
return insert_page(vma, addr, page, vma->vm_page_prot);
}
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_mixed);
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
*/
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pte_t *pte;
spinlock_t *ptl;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
return 0;
}
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
if (remap_pte_range(mm, pmd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
pfn -= addr >> PAGE_SHIFT;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
if (remap_pmd_range(mm, pud, addr, next,
pfn + (addr >> PAGE_SHIFT), prot))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target user address to start at
* @pfn: physical address of kernel memory
* @size: size of map area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + PAGE_ALIGN(size);
struct mm_struct *mm = vma->vm_mm;
int err;
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
* VM_PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
* VM_DONTEXPAND
* Disable vma merging and expanding with mremap().
* VM_DONTDUMP
* Omit vma from core dump, even when VM_IO turned off.
*
* There's a horrible special case to handle copy-on-write
* behaviour that some programs depend on. We mark the "original"
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
do {
next = pgd_addr_end(addr, end);
err = remap_pud_range(mm, pgd, addr, next,
pfn + (addr >> PAGE_SHIFT), prot);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (err)
untrack_pfn(vma, pfn, PAGE_ALIGN(size));
return err;
}
EXPORT_SYMBOL(remap_pfn_range);
/**
* vm_iomap_memory - remap memory to userspace
* @vma: user vma to map to
* @start: start of area
* @len: size of area
*
* This is a simplified io_remap_pfn_range() for common driver use. The
* driver just needs to give us the physical memory range to be mapped,
* we'll figure out the rest from the vma information.
*
* NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
* whatever write-combining details or similar.
*/
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
unsigned long vm_len, pfn, pages;
/* Check that the physical memory area passed in looks valid */
if (start + len < start)
return -EINVAL;
/*
* You *really* shouldn't map things that aren't page-aligned,
* but we've historically allowed it because IO memory might
* just have smaller alignment.
*/
len += start & ~PAGE_MASK;
pfn = start >> PAGE_SHIFT;
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
if (pfn + pages < pfn)
return -EINVAL;
/* We start the mapping 'vm_pgoff' pages into the area */
if (vma->vm_pgoff > pages)
return -EINVAL;
pfn += vma->vm_pgoff;
pages -= vma->vm_pgoff;
/* Can we fit all of the mapping? */
vm_len = vma->vm_end - vma->vm_start;
if (vm_len >> PAGE_SHIFT > pages)
return -EINVAL;
/* Ok, let it rip */
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pte_t *pte;
int err;
pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
pte_alloc_kernel(pmd, addr) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
BUG_ON(pmd_huge(*pmd));
arch_enter_lazy_mmu_mode();
token = pmd_pgtable(*pmd);
do {
err = fn(pte++, token, addr, data);
if (err)
break;
} while (addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
if (mm != &init_mm)
pte_unmap_unlock(pte-1, ptl);
return err;
}
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pmd_t *pmd;
unsigned long next;
int err;
BUG_ON(pud_huge(*pud));
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
if (err)
break;
} while (pmd++, addr = next, addr != end);
return err;
}
static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pud_t *pud;
unsigned long next;
int err;
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
if (err)
break;
} while (pud++, addr = next, addr != end);
return err;
}
/*
* Scan a region of virtual memory, filling in page tables as necessary
* and calling a provided function on each leaf page table.
*/
int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
unsigned long size, pte_fn_t fn, void *data)
{
pgd_t *pgd;
unsigned long next;
unsigned long end = addr + size;
int err;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
do {
next = pgd_addr_end(addr, end);
err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
if (err)
break;
} while (pgd++, addr = next, addr != end);
return err;
}
EXPORT_SYMBOL_GPL(apply_to_page_range);
/*
* handle_pte_fault chooses page fault handler according to an entry
* which was read non-atomically. Before making any commitment, on
* those architectures or configurations (e.g. i386 with PAE) which
* might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
* must check under lock before unmapping the pte and proceeding
* (but do_wp_page is only called after already making such a check;
* and do_anonymous_page can safely check later on).
*/
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
pte_t *page_table, pte_t orig_pte)
{
int same = 1;
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
if (sizeof(pte_t) > sizeof(unsigned long)) {
spinlock_t *ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
same = pte_same(*page_table, orig_pte);
spin_unlock(ptl);
}
#endif
pte_unmap(page_table);
return same;
}
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
if (unlikely(!src)) {
void *kaddr = kmap_atomic(dst);
void __user *uaddr = (void __user *)(va & PAGE_MASK);
/*
* This really shouldn't fail, because the page is there
* in the page tables. But it might just be unreadable,
* in which case we just give up and fill the result with
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
clear_page(kaddr);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
} else
copy_user_highpage(dst, src, va, vma);
}
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
* and decrementing the shared-page counter for the old page.
*
* Note that this routine assumes that the protection checks have been
* done by the caller (the low-level page fault routine in most cases).
* Thus we can safely just mark it writable once we've done any necessary
* COW.
*
* We also mark the page dirty at this point even though the page will
* change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), with pte both mapped and locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte)
__releases(ptl)
{
struct page *old_page, *new_page = NULL;
pte_t entry;
int ret = 0;
int page_mkwrite = 0;
struct page *dirty_page = NULL;
unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
/*
* VM_MIXEDMAP !pfn_valid() case
*
* We should not cow pages in a shared writeable mapping.
* Just mark the pages writable as we can't do any dirty
* accounting on raw pfn maps.
*/
if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))
goto reuse;
goto gotten;
}
/*
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
if (PageAnon(old_page) && !PageKsm(old_page)) {
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
lock_page(old_page);
page_table = pte_offset_map_lock(mm, pmd, address,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
goto unlock;
}
page_cache_release(old_page);
}
if (reuse_swap_page(old_page)) {
/*
* The page is all ours. Move it to our anon_vma so
* the rmap code will not search our parent or siblings.
* Protected against the rmap code by the page lock.
*/
page_move_anon_rmap(old_page, vma, address);
unlock_page(old_page);
goto reuse;
}
unlock_page(old_page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
/*
* Only catch write-faults on shared writable pages,
* read-only shared pages can get COWed by
* get_user_pages(.write=1, .force=1).
*/
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
struct vm_fault vmf;
int tmp;
vmf.virtual_address = (void __user *)(address &
PAGE_MASK);
vmf.pgoff = old_page->index;
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
vmf.page = old_page;
/*
* Notify the address space that the page is about to
* become writable so that it can prohibit this or wait
* for the page to get into an appropriate state.
*
* We do this without the lock held, so that it can
* sleep if it needs to.
*/
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
if (unlikely(tmp &
(VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
ret = tmp;
goto unwritable_page;
}
if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
lock_page(old_page);
if (!old_page->mapping) {
ret = 0; /* retry the fault */
unlock_page(old_page);
goto unwritable_page;
}
} else
VM_BUG_ON(!PageLocked(old_page));
/*
* Since we dropped the lock we need to revalidate
* the PTE as someone else may have changed it. If
* they did, we just return, as we can count on the
* MMU to tell us if they didn't also make it writable.
*/
page_table = pte_offset_map_lock(mm, pmd, address,
&ptl);
if (!pte_same(*page_table, orig_pte)) {
unlock_page(old_page);
goto unlock;
}
page_mkwrite = 1;
}
dirty_page = old_page;
get_page(dirty_page);
reuse:
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, address, page_table, entry,1))
update_mmu_cache(vma, address, page_table);
pte_unmap_unlock(page_table, ptl);
ret |= VM_FAULT_WRITE;
if (!dirty_page)
return ret;
/*
* Yes, Virginia, this is actually required to prevent a race
* with clear_page_dirty_for_io() from clearing the page dirty
* bit after it clear all dirty ptes, but before a racing
* do_wp_page installs a dirty pte.
*
* __do_fault is protected similarly.
*/
if (!page_mkwrite) {
wait_on_page_locked(dirty_page);
set_page_dirty_balance(dirty_page, page_mkwrite);
/* file_update_time outside page_lock */
if (vma->vm_file)
file_update_time(vma->vm_file);
}
put_page(dirty_page);
if (page_mkwrite) {
struct address_space *mapping = dirty_page->mapping;
set_page_dirty(dirty_page);
unlock_page(dirty_page);
page_cache_release(dirty_page);
if (mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
}
return ret;
}
/*
* Ok, we need to copy. Oh, well..
*/
page_cache_get(old_page);
gotten:
pte_unmap_unlock(page_table, ptl);
if (unlikely(anon_vma_prepare(vma)))
goto oom;
if (is_zero_pfn(pte_pfn(orig_pte))) {
new_page = alloc_zeroed_user_highpage_movable(vma, address);
if (!new_page)
goto oom;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
}
__SetPageUptodate(new_page);
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
mmun_start = address & PAGE_MASK;
mmun_end = mmun_start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* Re-check the pte - we dropped the lock
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
} else
inc_mm_counter_fast(mm, MM_ANONPAGES);
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/*
* Clear the pte entry and flush it first, before updating the
* pte with the new entry. This will avoid a race condition
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
ptep_clear_flush(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
* new page to be mapped directly into the secondary page table.
*/
set_pte_at_notify(mm, address, page_table, entry);
update_mmu_cache(vma, address, page_table);
if (old_page) {
/*
* Only after switching the pte to the new page may
* we remove the mapcount here. Otherwise another
* process may come and find the rmap count decremented
* before the pte is switched to the new page, and
* "reuse" the old page writing into it while our pte
* here still points into it and can be read by other
* threads.
*
* The critical issue is to order this
* page_remove_rmap with the ptp_clear_flush above.
* Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
* in page_remove_rmap.
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
* decremented mapcount is visible. And the old page
* cannot be reused until after the decremented
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
page_remove_rmap(old_page);
}
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
} else
mem_cgroup_uncharge_page(new_page);
if (new_page)
page_cache_release(new_page);
unlock:
pte_unmap_unlock(page_table, ptl);
if (mmun_end > mmun_start)
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
*/
if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
lock_page(old_page); /* LRU manipulation */
munlock_vma_page(old_page);
unlock_page(old_page);
}
page_cache_release(old_page);
}
return ret;
oom_free_new:
page_cache_release(new_page);
oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
unwritable_page:
page_cache_release(old_page);
return ret;
}
static void unmap_mapping_range_vma(struct vm_area_struct *vma,
unsigned long start_addr, unsigned long end_addr,
struct zap_details *details)
{
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
}
static inline void unmap_mapping_range_tree(struct rb_root *root,
struct zap_details *details)
{
struct vm_area_struct *vma;
pgoff_t vba, vea, zba, zea;
vma_interval_tree_foreach(vma, root,
details->first_index, details->last_index) {
vba = vma->vm_pgoff;
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
zba = details->first_index;
if (zba < vba)
zba = vba;
zea = details->last_index;
if (zea > vea)
zea = vea;
unmap_mapping_range_vma(vma,
((zba - vba) << PAGE_SHIFT) + vma->vm_start,
((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
details);
}
}
static inline void unmap_mapping_range_list(struct list_head *head,
struct zap_details *details)
{
struct vm_area_struct *vma;
/*
* In nonlinear VMAs there is no correspondence between virtual address
* offset and file offset. So we must perform an exhaustive search
* across *all* the pages in each nonlinear VMA, not just the pages
* whose virtual address lies outside the file truncation point.
*/
list_for_each_entry(vma, head, shared.nonlinear) {
details->nonlinear_vma = vma;
unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
}
}
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
* boundary. Note that this is different from truncate_pagecache(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
* up to a PAGE_SIZE boundary. A holelen of zero truncates to the
* end of the file.
* @even_cows: 1 when truncating a file, unmap even private COWed pages;
* but 0 when invalidating pagecache, don't throw away private data.
*/
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
struct zap_details details;
pgoff_t hba = holebegin >> PAGE_SHIFT;
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
long long holeend =
(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (holeend & ~(long long)ULONG_MAX)
hlen = ULONG_MAX - hba + 1;
}
details.check_mapping = even_cows? NULL: mapping;
details.nonlinear_vma = NULL;
details.first_index = hba;
details.last_index = hba + hlen - 1;
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
mutex_lock(&mapping->i_mmap_mutex);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
mutex_unlock(&mapping->i_mmap_mutex);
}
EXPORT_SYMBOL(unmap_mapping_range);
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page, *swapcache;
swp_entry_t entry;
pte_t pte;
int locked;
struct mem_cgroup *ptr;
int exclusive = 0;
int ret = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
goto out;
entry = pte_to_swp_entry(orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (is_migration_entry(entry)) {
migration_entry_wait(mm, pmd, address);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else {
print_bad_pte(vma, address, orig_pte, NULL);
ret = VM_FAULT_SIGBUS;
}
goto out;
}
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte)))
ret = VM_FAULT_OOM;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
goto unlock;
}
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(mm, PGMAJFAULT);
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
* owner processes (which may be unknown at hwpoison time)
*/
ret = VM_FAULT_HWPOISON;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
swapcache = page;
goto out_release;
}
swapcache = page;
locked = lock_page_or_retry(page, mm, flags);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
goto out_release;
}
/*
* Make sure try_to_free_swap or reuse_swap_page or swapoff did not
* release the swapcache from under us. The page pin, and pte_same
* test below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not changed.
*/
if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
goto out_page;
page = ksm_might_need_to_copy(page, vma, address);
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
page = swapcache;
goto out_page;
}
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
goto out_page;
}
/*
* Back out if somebody else already faulted in this pte.
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*page_table, orig_pte)))
goto out_nomap;
if (unlikely(!PageUptodate(page))) {
ret = VM_FAULT_SIGBUS;
goto out_nomap;
}
/*
* The page isn't present yet, go ahead with the fault.
*
* Be careful about the sequence of operations here.
* To get its accounting right, reuse_swap_page() must be called
* while the page is counted on swap but not yet in mapcount i.e.
* before page_add_anon_rmap() and swap_free(); try_to_free_swap()
* must be called after the swap_free(), or it will never succeed.
* Because delete_from_swap_page() may be called by reuse_swap_page(),
* mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
* in page->private. In this case, a record in swap_cgroup is silently
* discarded at swap_free().
*/
inc_mm_counter_fast(mm, MM_ANONPAGES);
dec_mm_counter_fast(mm, MM_SWAPENTS);
pte = mk_pte(page, vma->vm_page_prot);
if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
flags &= ~FAULT_FLAG_WRITE;
ret |= VM_FAULT_WRITE;
exclusive = 1;
}
flush_icache_page(vma, page);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache)
do_page_add_anon_rmap(page, vma, address, exclusive);
else /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, address);
/* It's better to call commit-charge after rmap is established */
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (page != swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
page_cache_release(swapcache);
}
if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
goto out;
}
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
out:
return ret;
out_nomap:
mem_cgroup_cancel_charge_swapin(ptr);
pte_unmap_unlock(page_table, ptl);
out_page:
unlock_page(page);
out_release:
page_cache_release(page);
if (page != swapcache) {
unlock_page(swapcache);
page_cache_release(swapcache);
}
return ret;
}
/*
* This is like a special single-page "expand_{down|up}wards()",
* except we must first make sure that 'address{-|+}PAGE_SIZE'
* doesn't hit another vma.
*/
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
struct vm_area_struct *prev = vma->vm_prev;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split..
*/
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
/* As VM_GROWSDOWN but s/below/above/ */
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
struct page *page;
spinlock_t *ptl;
pte_t entry;
pte_unmap(page_table);
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
return VM_FAULT_SIGBUS;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
vma->vm_page_prot));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto unlock;
goto setpte;
}
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
if (!page)
goto oom;
__SetPageUptodate(page);
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto release;
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
release:
mem_cgroup_uncharge_page(page);
page_cache_release(page);
goto unlock;
oom_free_page:
page_cache_release(page);
oom:
return VM_FAULT_OOM;
}
/*
* __do_fault() tries to create a new page mapping. It aggressively
* tries to share with existing pages, but makes a separate copy if
* the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
* the next page fault.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte neither mapped nor locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
pte_t *page_table;
spinlock_t *ptl;
struct page *page;
struct page *cow_page;
pte_t entry;
int anon = 0;
struct page *dirty_page = NULL;
struct vm_fault vmf;
int ret;
int page_mkwrite = 0;
/*
* If we do COW later, allocate page befor taking lock_page()
* on the file cache page. This will reduce lock holding time.
*/
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!cow_page)
return VM_FAULT_OOM;
if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) {
page_cache_release(cow_page);
return VM_FAULT_OOM;
}
} else
cow_page = NULL;
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff;
vmf.flags = flags;
vmf.page = NULL;
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY)))
goto uncharge_out;
if (unlikely(PageHWPoison(vmf.page))) {
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf.page);
ret = VM_FAULT_HWPOISON;
goto uncharge_out;
}
/*
* For consistency in subsequent calls, make the faulted page always
* locked.
*/
if (unlikely(!(ret & VM_FAULT_LOCKED)))
lock_page(vmf.page);
else
VM_BUG_ON(!PageLocked(vmf.page));
/*
* Should we do an early C-O-W break?
*/
page = vmf.page;
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
page = cow_page;
anon = 1;
copy_user_highpage(page, vmf.page, address, vma);
__SetPageUptodate(page);
} else {
/*
* If the page will be shareable, see if the backing
* address space wants to know that the page is about
* to become writable
*/
if (vma->vm_ops->page_mkwrite) {
int tmp;
unlock_page(page);
vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
if (unlikely(tmp &
(VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
ret = tmp;
goto unwritable_page;
}
if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
lock_page(page);
if (!page->mapping) {
ret = 0; /* retry the fault */
unlock_page(page);
goto unwritable_page;
}
} else
VM_BUG_ON(!PageLocked(page));
page_mkwrite = 1;
}
}
}
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
/*
* This silly early PAGE_DIRTY setting removes a race
* due to the bad i386 page protection. But it's valid
* for other architectures too.
*
* Note that if FAULT_FLAG_WRITE is set, we either now have
* an exclusive copy of the page, or this is a shared mapping,
* so we can make it writable and dirty to avoid having to
* handle that later.
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
} else {
inc_mm_counter_fast(mm, MM_FILEPAGES);
page_add_file_rmap(page);
if (flags & FAULT_FLAG_WRITE) {
dirty_page = page;
get_page(dirty_page);
}
}
set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
if (anon)
page_cache_release(page);
else
anon = 1; /* no anon but release faulted_page */
}
pte_unmap_unlock(page_table, ptl);
if (dirty_page) {
struct address_space *mapping = page->mapping;
int dirtied = 0;
if (set_page_dirty(dirty_page))
dirtied = 1;
unlock_page(dirty_page);
put_page(dirty_page);
if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
/* file_update_time outside page_lock */
if (vma->vm_file && !page_mkwrite)
file_update_time(vma->vm_file);
} else {
unlock_page(vmf.page);
if (anon)
page_cache_release(vmf.page);
}
return ret;
unwritable_page:
page_cache_release(page);
return ret;
uncharge_out:
/* fs's fault handler get error */
if (cow_page) {
mem_cgroup_uncharge_page(cow_page);
page_cache_release(cow_page);
}
return ret;
}
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
pte_unmap(page_table);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
/*
* Fault of a previously existing named mapping. Repopulate the pte
* from the encoded file_pte if possible. This enables swappable
* nonlinear vmas.
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff;
flags |= FAULT_FLAG_NONLINEAR;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
return 0;
if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
/*
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
return VM_FAULT_SIGBUS;
}
pgoff = pte_to_pgoff(orig_pte);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int current_nid)
{
get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS);
if (current_nid == numa_node_id())
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
return mpol_misplaced(page, vma, addr);
}
int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
{
struct page *page = NULL;
spinlock_t *ptl;
int current_nid = -1;
int target_nid;
bool migrated = false;
/*
* The "pte" at this point cannot be used safely without
* validation through pte_unmap_same(). It's of NUMA type but
* the pfn may be screwed if the read is non atomic.
*
* ptep_modify_prot_start is not called as this is clearing
* the _PAGE_NUMA bit and it is not really expected that there
* would be concurrent hardware modifications to the PTE.
*/
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*ptep, pte))) {
pte_unmap_unlock(ptep, ptl);
goto out;
}
pte = pte_mknonnuma(pte);
set_pte_at(mm, addr, ptep, pte);
update_mmu_cache(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (!page) {
pte_unmap_unlock(ptep, ptl);
return 0;
}
current_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, current_nid);
pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) {
/*
* Account for the fault against the current node if it not
* being replaced regardless of where the page is located.
*/
current_nid = numa_node_id();
put_page(page);
goto out;
}
/* Migrate to the requested node */
migrated = migrate_misplaced_page(page, target_nid);
if (migrated)
current_nid = target_nid;
out:
if (current_nid != -1)
task_numa_fault(current_nid, 1, migrated);
return 0;
}
/* NUMA hinting page fault entry point for regular pmds */
#ifdef CONFIG_NUMA_BALANCING
static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd;
pte_t *pte, *orig_pte;
unsigned long _addr = addr & PMD_MASK;
unsigned long offset;
spinlock_t *ptl;
bool numa = false;
int local_nid = numa_node_id();
spin_lock(&mm->page_table_lock);
pmd = *pmdp;
if (pmd_numa(pmd)) {
set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
numa = true;
}
spin_unlock(&mm->page_table_lock);
if (!numa)
return 0;
/* we're in a page fault so some vma must be in the range */
BUG_ON(!vma);
BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
offset = max(_addr, vma->vm_start) & ~PMD_MASK;
VM_BUG_ON(offset >= PMD_SIZE);
orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
pte += offset >> PAGE_SHIFT;
for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
pte_t pteval = *pte;
struct page *page;
int curr_nid = local_nid;
int target_nid;
bool migrated;
if (!pte_present(pteval))
continue;
if (!pte_numa(pteval))
continue;
if (addr >= vma->vm_end) {
vma = find_vma(mm, addr);
/* there's a pte present so there must be a vma */
BUG_ON(!vma);
BUG_ON(addr < vma->vm_start);
}
if (pte_numa(pteval)) {
pteval = pte_mknonnuma(pteval);
set_pte_at(mm, addr, pte, pteval);
}
page = vm_normal_page(vma, addr, pteval);
if (unlikely(!page))
continue;
/* only check non-shared pages */
if (unlikely(page_mapcount(page) != 1))
continue;
/*
* Note that the NUMA fault is later accounted to either
* the node that is currently running or where the page is
* migrated to.
*/
curr_nid = local_nid;
target_nid = numa_migrate_prep(page, vma, addr,
page_to_nid(page));
if (target_nid == -1) {
put_page(page);
continue;
}
/* Migrate to the requested node */
pte_unmap_unlock(pte, ptl);
migrated = migrate_misplaced_page(page, target_nid);
if (migrated)
curr_nid = target_nid;
task_numa_fault(curr_nid, 1, migrated);
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
}
pte_unmap_unlock(orig_pte, ptl);
return 0;
}
#else
static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
BUG();
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
* RISC architectures). The early dirtying is also good on the i386.
*
* There is also a hook called "update_mmu_cache()" that architectures
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
*/
int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags)
{
pte_t entry;
spinlock_t *ptl;
entry = *pte;
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}
if (pte_numa(entry))
return do_numa_page(mm, vma, address, entry, pte, pmd);
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, pte);
} else {
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
}
/*
* By the time we get here, we already hold the mm semaphore
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
mem_cgroup_count_vm_event(mm, PGFAULT);
/* do counter updates before entering really critical section. */
check_sync_rss_stat(current);
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
return VM_FAULT_OOM;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
return VM_FAULT_OOM;
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
if (!vma->vm_ops)
return do_huge_pmd_anonymous_page(mm, vma, address,
pmd, flags);
} else {
pmd_t orig_pmd = *pmd;
int ret;
barrier();
if (pmd_trans_huge(orig_pmd)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
/*
* If the pmd is splitting, return and retry the
* the fault. Alternative: wait until the split
* is done, and goto retry.
*/
if (pmd_trans_splitting(orig_pmd))
return 0;
if (pmd_numa(orig_pmd))
return do_huge_pmd_numa_page(mm, vma, address,
orig_pmd, pmd);
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
/*
* If COW results in an oom, the huge pmd will
* have been split, so retry the fault on the
* pte for a smaller charge.
*/
if (unlikely(ret & VM_FAULT_OOM))
goto retry;
return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
}
return 0;
}
}
if (pmd_numa(*pmd))
return do_pmd_numa_page(mm, vma, address, pmd);
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
* from under us anymore at this point because we hold the mmap_sem
* read mode and khugepaged takes it in write mode. So now it's
* safe to run pte_offset_map().
*/
pte = pte_offset_map(pmd, address);
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
* We've already handled the fast-path in-line.
*/
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
pud_t *new = pud_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) /* Another has populated it */
pud_free(mm, new);
else
pgd_populate(mm, pgd, new);
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Allocate page middle directory.
* We've already handled the fast-path in-line.
*/
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
pmd_t *new = pmd_alloc_one(mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) /* Another has populated it */
pmd_free(mm, new);
else
pud_populate(mm, pud, new);
#else
if (pgd_present(*pud)) /* Another has populated it */
pmd_free(mm, new);
else
pgd_populate(mm, pud, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
#if defined(AT_SYSINFO_EHDR)
static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
gate_vma.vm_mm = NULL;
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
gate_vma.vm_page_prot = __P101;
return 0;
}
__initcall(gate_vma_init);
#endif
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef AT_SYSINFO_EHDR
return &gate_vma;
#else
return NULL;
#endif
}
int in_gate_area_no_mm(unsigned long addr)
{
#ifdef AT_SYSINFO_EHDR
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
return 1;
#endif
return 0;
}
#endif /* __HAVE_ARCH_GATE_AREA */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto out;
pud = pud_offset(pgd, address);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
goto out;
pmd = pmd_offset(pud, address);
VM_BUG_ON(pmd_trans_huge(*pmd));
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out;
/* We cannot handle huge page PFN maps. Luckily they don't exist. */
if (pmd_huge(*pmd))
goto out;
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
if (!ptep)
goto out;
if (!pte_present(*ptep))
goto unlock;
*ptepp = ptep;
return 0;
unlock:
pte_unmap_unlock(ptep, *ptlp);
out:
return -EINVAL;
}
static inline int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
!(res = __follow_pte(mm, address, ptepp, ptlp)));
return res;
}
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
* @address: user virtual address
* @pfn: location to store found PFN
*
* Only IO mappings and raw PFN mappings are allowed.
*
* Returns zero and the pfn at @pfn on success, -ve otherwise.
*/
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn)
{
int ret = -EINVAL;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return ret;
ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
if (ret)
return ret;
*pfn = pte_pfn(*ptep);
pte_unmap_unlock(ptep, ptl);
return 0;
}
EXPORT_SYMBOL(follow_pfn);
#ifdef CONFIG_HAVE_IOREMAP_PROT
int follow_phys(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned long *prot, resource_size_t *phys)
{
int ret = -EINVAL;
pte_t *ptep, pte;
spinlock_t *ptl;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
goto out;
if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
goto out;
pte = *ptep;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
*prot = pgprot_val(pte_pgprot(pte));
*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
ret = 0;
unlock:
pte_unmap_unlock(ptep, ptl);
out:
return ret;
}
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
resource_size_t phys_addr;
unsigned long prot = 0;
void __iomem *maddr;
int offset = addr & (PAGE_SIZE-1);
if (follow_phys(vma, addr, write, &prot, &phys_addr))
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
if (write)
memcpy_toio(maddr + offset, buf, len);
else
memcpy_fromio(buf, maddr + offset, len);
iounmap(maddr);
return len;
}
#endif
/*
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, int write)
{
struct vm_area_struct *vma;
void *old_buf = buf;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, ret, offset;
void *maddr;
struct page *page = NULL;
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
*/
#ifdef CONFIG_HAVE_IOREMAP_PROT
vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
break;
if (vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,
len, write);
if (ret <= 0)
#endif
break;
bytes = ret;
} else {
bytes = len;
offset = addr & (PAGE_SIZE-1);
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
maddr = kmap(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
set_page_dirty_lock(page);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
kunmap(page);
page_cache_release(page);
}
len -= bytes;
buf += bytes;
addr += bytes;
}
up_read(&mm->mmap_sem);
return buf - old_buf;
}
/**
* access_remote_vm - access another process' address space
* @mm: the mm_struct of the target address space
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
* @write: whether the access is a write
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write)
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
/*
* Access another process' address space.
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, int write)
{
struct mm_struct *mm;
int ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
mmput(mm);
return ret;
}
/*
* Print the name of a VMA.
*/
void print_vma_addr(char *prefix, unsigned long ip)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
/*
* Do not print if we are in atomic
* contexts (in exception stacks, etc.):
*/
if (preempt_count())
return;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma && vma->vm_file) {
struct file *f = vma->vm_file;
char *buf = (char *)__get_free_page(GFP_KERNEL);
if (buf) {
char *p;
p = d_path(&f->f_path, buf, PAGE_SIZE);
if (IS_ERR(p))
p = "?";
printk("%s%s[%lx+%lx]", prefix, kbasename(p),
vma->vm_start,
vma->vm_end - vma->vm_start);
free_page((unsigned long)buf);
}
}
up_read(&mm->mmap_sem);
}
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void)
{
/*
* Some code (nfs/sunrpc) uses socket ops on kernel memory while
* holding the mmap_sem, this is safe because kernel memory doesn't
* get paged out, therefore we'll never actually fault, and the
* below annotations will generate false positives.
*/
if (segment_eq(get_fs(), KERNEL_DS))
return;
might_sleep();
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
if (!in_atomic() && current->mm)
might_lock_read(¤t->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
static void clear_gigantic_page(struct page *page,
unsigned long addr,
unsigned int pages_per_huge_page)
{
int i;
struct page *p = page;
might_sleep();
for (i = 0; i < pages_per_huge_page;
i++, p = mem_map_next(p, page, i)) {
cond_resched();
clear_user_highpage(p, addr + i * PAGE_SIZE);
}
}
void clear_huge_page(struct page *page,
unsigned long addr, unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
static void copy_user_gigantic_page(struct page *dst, struct page *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < pages_per_huge_page; ) {
cond_resched();
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5668_1 |
crossvul-cpp_data_good_2020_10 | /*-------------------------------------------------------------------------
*
* tsquery_util.c
* Utilities for tsquery datatype
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/utils/adt/tsquery_util.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "tsearch/ts_utils.h"
#include "miscadmin.h"
QTNode *
QT2QTN(QueryItem *in, char *operand)
{
QTNode *node = (QTNode *) palloc0(sizeof(QTNode));
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
node->valnode = in;
if (in->type == QI_OPR)
{
node->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
node->child[0] = QT2QTN(in + 1, operand);
node->sign = node->child[0]->sign;
if (in->qoperator.oper == OP_NOT)
node->nchild = 1;
else
{
node->nchild = 2;
node->child[1] = QT2QTN(in + in->qoperator.left, operand);
node->sign |= node->child[1]->sign;
}
}
else if (operand)
{
node->word = operand + in->qoperand.distance;
node->sign = ((uint32) 1) << (((unsigned int) in->qoperand.valcrc) % 32);
}
return node;
}
void
QTNFree(QTNode *in)
{
if (!in)
return;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type == QI_VAL && in->word && (in->flags & QTN_WORDFREE) != 0)
pfree(in->word);
if (in->child)
{
if (in->valnode)
{
if (in->valnode->type == QI_OPR && in->nchild > 0)
{
int i;
for (i = 0; i < in->nchild; i++)
QTNFree(in->child[i]);
}
if (in->flags & QTN_NEEDFREE)
pfree(in->valnode);
}
pfree(in->child);
}
pfree(in);
}
int
QTNodeCompare(QTNode *an, QTNode *bn)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (an->valnode->type != bn->valnode->type)
return (an->valnode->type > bn->valnode->type) ? -1 : 1;
if (an->valnode->type == QI_OPR)
{
QueryOperator *ao = &an->valnode->qoperator;
QueryOperator *bo = &bn->valnode->qoperator;
if (ao->oper != bo->oper)
return (ao->oper > bo->oper) ? -1 : 1;
if (an->nchild != bn->nchild)
return (an->nchild > bn->nchild) ? -1 : 1;
{
int i,
res;
for (i = 0; i < an->nchild; i++)
if ((res = QTNodeCompare(an->child[i], bn->child[i])) != 0)
return res;
}
return 0;
}
else if (an->valnode->type == QI_VAL)
{
QueryOperand *ao = &an->valnode->qoperand;
QueryOperand *bo = &bn->valnode->qoperand;
if (ao->valcrc != bo->valcrc)
{
return (ao->valcrc > bo->valcrc) ? -1 : 1;
}
return tsCompareString(an->word, ao->length, bn->word, bo->length, false);
}
else
{
elog(ERROR, "unrecognized QueryItem type: %d", an->valnode->type);
return 0; /* keep compiler quiet */
}
}
static int
cmpQTN(const void *a, const void *b)
{
return QTNodeCompare(*(QTNode *const *) a, *(QTNode *const *) b);
}
void
QTNSort(QTNode *in)
{
int i;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type != QI_OPR)
return;
for (i = 0; i < in->nchild; i++)
QTNSort(in->child[i]);
if (in->nchild > 1)
qsort((void *) in->child, in->nchild, sizeof(QTNode *), cmpQTN);
}
bool
QTNEq(QTNode *a, QTNode *b)
{
uint32 sign = a->sign & b->sign;
if (!(sign == a->sign && sign == b->sign))
return 0;
return (QTNodeCompare(a, b) == 0) ? true : false;
}
/*
* Remove unnecessary intermediate nodes. For example:
*
* OR OR
* a OR -> a b c
* b c
*/
void
QTNTernary(QTNode *in)
{
int i;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type != QI_OPR)
return;
for (i = 0; i < in->nchild; i++)
QTNTernary(in->child[i]);
for (i = 0; i < in->nchild; i++)
{
QTNode *cc = in->child[i];
if (cc->valnode->type == QI_OPR && in->valnode->qoperator.oper == cc->valnode->qoperator.oper)
{
int oldnchild = in->nchild;
in->nchild += cc->nchild - 1;
in->child = (QTNode **) repalloc(in->child, in->nchild * sizeof(QTNode *));
if (i + 1 != oldnchild)
memmove(in->child + i + cc->nchild, in->child + i + 1,
(oldnchild - i - 1) * sizeof(QTNode *));
memcpy(in->child + i, cc->child, cc->nchild * sizeof(QTNode *));
i += cc->nchild - 1;
if (cc->flags & QTN_NEEDFREE)
pfree(cc->valnode);
pfree(cc);
}
}
}
/*
* Convert a tree to binary tree by inserting intermediate nodes.
* (Opposite of QTNTernary)
*/
void
QTNBinary(QTNode *in)
{
int i;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type != QI_OPR)
return;
for (i = 0; i < in->nchild; i++)
QTNBinary(in->child[i]);
if (in->nchild <= 2)
return;
while (in->nchild > 2)
{
QTNode *nn = (QTNode *) palloc0(sizeof(QTNode));
nn->valnode = (QueryItem *) palloc0(sizeof(QueryItem));
nn->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
nn->nchild = 2;
nn->flags = QTN_NEEDFREE;
nn->child[0] = in->child[0];
nn->child[1] = in->child[1];
nn->sign = nn->child[0]->sign | nn->child[1]->sign;
nn->valnode->type = in->valnode->type;
nn->valnode->qoperator.oper = in->valnode->qoperator.oper;
in->child[0] = nn;
in->child[1] = in->child[in->nchild - 1];
in->nchild--;
}
}
/*
* Count the total length of operand string in tree, including '\0'-
* terminators.
*/
static void
cntsize(QTNode *in, int *sumlen, int *nnode)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
*nnode += 1;
if (in->valnode->type == QI_OPR)
{
int i;
for (i = 0; i < in->nchild; i++)
cntsize(in->child[i], sumlen, nnode);
}
else
{
*sumlen += in->valnode->qoperand.length + 1;
}
}
typedef struct
{
QueryItem *curitem;
char *operand;
char *curoperand;
} QTN2QTState;
static void
fillQT(QTN2QTState *state, QTNode *in)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
if (in->valnode->type == QI_VAL)
{
memcpy(state->curitem, in->valnode, sizeof(QueryOperand));
memcpy(state->curoperand, in->word, in->valnode->qoperand.length);
state->curitem->qoperand.distance = state->curoperand - state->operand;
state->curoperand[in->valnode->qoperand.length] = '\0';
state->curoperand += in->valnode->qoperand.length + 1;
state->curitem++;
}
else
{
QueryItem *curitem = state->curitem;
Assert(in->valnode->type == QI_OPR);
memcpy(state->curitem, in->valnode, sizeof(QueryOperator));
Assert(in->nchild <= 2);
state->curitem++;
fillQT(state, in->child[0]);
if (in->nchild == 2)
{
curitem->qoperator.left = state->curitem - curitem;
fillQT(state, in->child[1]);
}
}
}
TSQuery
QTN2QT(QTNode *in)
{
TSQuery out;
int len;
int sumlen = 0,
nnode = 0;
QTN2QTState state;
cntsize(in, &sumlen, &nnode);
if (TSQUERY_TOO_BIG(nnode, sumlen))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("tsquery is too large")));
len = COMPUTESIZE(nnode, sumlen);
out = (TSQuery) palloc0(len);
SET_VARSIZE(out, len);
out->size = nnode;
state.curitem = GETQUERY(out);
state.operand = state.curoperand = GETOPERAND(out);
fillQT(&state, in);
return out;
}
QTNode *
QTNCopy(QTNode *in)
{
QTNode *out;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
out = (QTNode *) palloc(sizeof(QTNode));
*out = *in;
out->valnode = (QueryItem *) palloc(sizeof(QueryItem));
*(out->valnode) = *(in->valnode);
out->flags |= QTN_NEEDFREE;
if (in->valnode->type == QI_VAL)
{
out->word = palloc(in->valnode->qoperand.length + 1);
memcpy(out->word, in->word, in->valnode->qoperand.length);
out->word[in->valnode->qoperand.length] = '\0';
out->flags |= QTN_WORDFREE;
}
else
{
int i;
out->child = (QTNode **) palloc(sizeof(QTNode *) * in->nchild);
for (i = 0; i < in->nchild; i++)
out->child[i] = QTNCopy(in->child[i]);
}
return out;
}
void
QTNClearFlags(QTNode *in, uint32 flags)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
in->flags &= ~flags;
if (in->valnode->type != QI_VAL)
{
int i;
for (i = 0; i < in->nchild; i++)
QTNClearFlags(in->child[i], flags);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_10 |
crossvul-cpp_data_good_995_0 | // SPDX-License-Identifier: GPL-2.0+
/*
* f_hid.c -- USB HID function driver
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hid.h>
#include <linux/idr.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/usb/g_hid.h>
#include "u_f.h"
#include "u_hid.h"
#define HIDG_MINORS 4
static int major, minors;
static struct class *hidg_class;
static DEFINE_IDA(hidg_ida);
static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
/*-------------------------------------------------------------------------*/
/* HID gadget struct */
struct f_hidg_req_list {
struct usb_request *req;
unsigned int pos;
struct list_head list;
};
struct f_hidg {
/* configuration */
unsigned char bInterfaceSubClass;
unsigned char bInterfaceProtocol;
unsigned char protocol;
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
/* recv report */
struct list_head completed_out_req;
spinlock_t read_spinlock;
wait_queue_head_t read_queue;
unsigned int qlen;
/* send report */
spinlock_t write_spinlock;
bool write_pending;
wait_queue_head_t write_queue;
struct usb_request *req;
int minor;
struct cdev cdev;
struct usb_function func;
struct usb_ep *in_ep;
struct usb_ep *out_ep;
};
static inline struct f_hidg *func_to_hidg(struct usb_function *f)
{
return container_of(f, struct f_hidg, func);
}
/*-------------------------------------------------------------------------*/
/* Static descriptors */
static struct usb_interface_descriptor hidg_interface_desc = {
.bLength = sizeof hidg_interface_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bAlternateSetting = 0,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_HID,
/* .bInterfaceSubClass = DYNAMIC */
/* .bInterfaceProtocol = DYNAMIC */
/* .iInterface = DYNAMIC */
};
static struct hid_descriptor hidg_desc = {
.bLength = sizeof hidg_desc,
.bDescriptorType = HID_DT_HID,
.bcdHID = 0x0101,
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
/*.desc[0].bDescriptorType = DYNAMIC */
/*.desc[0].wDescriptorLenght = DYNAMIC */
};
/* Super-Speed Support */
static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
.bLength = sizeof(hidg_ss_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
/* .wBytesPerInterval = DYNAMIC */
};
static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
.bLength = sizeof(hidg_ss_out_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
/* .wBytesPerInterval = DYNAMIC */
};
static struct usb_descriptor_header *hidg_ss_descriptors[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
(struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
(struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
(struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
NULL,
};
/* High-Speed Support */
static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 4, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_descriptor_header *hidg_hs_descriptors[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
(struct usb_descriptor_header *)&hidg_hs_out_ep_desc,
NULL,
};
/* Full-Speed Support */
static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 10, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
.bInterval = 10, /* FIXME: Add this field in the
* HID gadget configuration?
* (struct hidg_func_descriptor)
*/
};
static struct usb_descriptor_header *hidg_fs_descriptors[] = {
(struct usb_descriptor_header *)&hidg_interface_desc,
(struct usb_descriptor_header *)&hidg_desc,
(struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
(struct usb_descriptor_header *)&hidg_fs_out_ep_desc,
NULL,
};
/*-------------------------------------------------------------------------*/
/* Strings */
#define CT_FUNC_HID_IDX 0
static struct usb_string ct_func_string_defs[] = {
[CT_FUNC_HID_IDX].s = "HID Interface",
{}, /* end of list */
};
static struct usb_gadget_strings ct_func_string_table = {
.language = 0x0409, /* en-US */
.strings = ct_func_string_defs,
};
static struct usb_gadget_strings *ct_func_strings[] = {
&ct_func_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
/* Char Device */
static ssize_t f_hidg_read(struct file *file, char __user *buffer,
size_t count, loff_t *ptr)
{
struct f_hidg *hidg = file->private_data;
struct f_hidg_req_list *list;
struct usb_request *req;
unsigned long flags;
int ret;
if (!count)
return 0;
if (!access_ok(buffer, count))
return -EFAULT;
spin_lock_irqsave(&hidg->read_spinlock, flags);
#define READ_COND (!list_empty(&hidg->completed_out_req))
/* wait for at least one buffer to complete */
while (!READ_COND) {
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(hidg->read_queue, READ_COND))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->read_spinlock, flags);
}
/* pick the first one */
list = list_first_entry(&hidg->completed_out_req,
struct f_hidg_req_list, list);
/*
* Remove this from list to protect it from beign free()
* while host disables our function
*/
list_del(&list->list);
req = list->req;
count = min_t(unsigned int, count, req->actual - list->pos);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
/* copy to user outside spinlock */
count -= copy_to_user(buffer, req->buf + list->pos, count);
list->pos += count;
/*
* if this request is completely handled and transfered to
* userspace, remove its entry from the list and requeue it
* again. Otherwise, we will revisit it again upon the next
* call, taking into account its current read position.
*/
if (list->pos == req->actual) {
kfree(list);
req->length = hidg->report_length;
ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
if (ret < 0) {
free_ep_req(hidg->out_ep, req);
return ret;
}
} else {
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add(&list->list, &hidg->completed_out_req);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
}
return count;
}
static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
unsigned long flags;
if (req->status != 0) {
ERROR(hidg->func.config->cdev,
"End Point Request ERROR: %d\n", req->status);
}
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
struct usb_request *req;
unsigned long flags;
ssize_t status = -ENOMEM;
if (!access_ok(buffer, count))
return -EFAULT;
spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
try_again:
/* write queue */
while (!WRITE_COND) {
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible_exclusive(
hidg->write_queue, WRITE_COND))
return -ERESTARTSYS;
spin_lock_irqsave(&hidg->write_spinlock, flags);
}
hidg->write_pending = 1;
req = hidg->req;
count = min_t(unsigned, count, hidg->report_length);
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
status = -EINVAL;
goto release_write_pending;
}
spin_lock_irqsave(&hidg->write_spinlock, flags);
/* when our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, req);
/*
* TODO
* Should we fail with error here?
*/
goto try_again;
}
req->status = 0;
req->zero = 0;
req->length = count;
req->complete = f_hidg_req_complete;
req->context = hidg;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
goto release_write_pending;
} else {
status = count;
}
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
return status;
}
static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
__poll_t ret = 0;
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
if (WRITE_COND)
ret |= EPOLLOUT | EPOLLWRNORM;
if (READ_COND)
ret |= EPOLLIN | EPOLLRDNORM;
return ret;
}
#undef WRITE_COND
#undef READ_COND
static int f_hidg_release(struct inode *inode, struct file *fd)
{
fd->private_data = NULL;
return 0;
}
static int f_hidg_open(struct inode *inode, struct file *fd)
{
struct f_hidg *hidg =
container_of(inode->i_cdev, struct f_hidg, cdev);
fd->private_data = hidg;
return 0;
}
/*-------------------------------------------------------------------------*/
/* usb_function */
static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
return alloc_ep_req(ep, length);
}
static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *) req->context;
struct usb_composite_dev *cdev = hidg->func.config->cdev;
struct f_hidg_req_list *req_list;
unsigned long flags;
switch (req->status) {
case 0:
req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
if (!req_list) {
ERROR(cdev, "Unable to allocate mem for req_list\n");
goto free_req;
}
req_list->req = req;
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_add_tail(&req_list->list, &hidg->completed_out_req);
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
wake_up(&hidg->read_queue);
break;
default:
ERROR(cdev, "Set report failed %d\n", req->status);
/* FALLTHROUGH */
case -ECONNABORTED: /* hardware forced ep reset */
case -ECONNRESET: /* request dequeued */
case -ESHUTDOWN: /* disconnect from host */
free_req:
free_ep_req(ep, req);
return;
}
}
static int hidg_setup(struct usb_function *f,
const struct usb_ctrlrequest *ctrl)
{
struct f_hidg *hidg = func_to_hidg(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int status = 0;
__u16 value, length;
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
VDBG(cdev,
"%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
__func__, ctrl->bRequestType, ctrl->bRequest, value);
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_REPORT):
VDBG(cdev, "get_report\n");
/* send an empty report */
length = min_t(unsigned, length, hidg->report_length);
memset(req->buf, 0x0, length);
goto respond;
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_GET_PROTOCOL):
VDBG(cdev, "get_protocol\n");
length = min_t(unsigned int, length, 1);
((u8 *) req->buf)[0] = hidg->protocol;
goto respond;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_REPORT):
VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
goto stall;
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
| HID_REQ_SET_PROTOCOL):
VDBG(cdev, "set_protocol\n");
if (value > HID_REPORT_PROTOCOL)
goto stall;
length = 0;
/*
* We assume that programs implementing the Boot protocol
* are also compatible with the Report Protocol
*/
if (hidg->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) {
hidg->protocol = value;
goto respond;
}
goto stall;
break;
case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
| USB_REQ_GET_DESCRIPTOR):
switch (value >> 8) {
case HID_DT_HID:
{
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
hidg_desc_copy.desc[0].wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
hidg_desc_copy.bLength);
memcpy(req->buf, &hidg_desc_copy, length);
goto respond;
break;
}
case HID_DT_REPORT:
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
length = min_t(unsigned short, length,
hidg->report_desc_length);
memcpy(req->buf, hidg->report_desc, length);
goto respond;
break;
default:
VDBG(cdev, "Unknown descriptor request 0x%x\n",
value >> 8);
goto stall;
break;
}
break;
default:
VDBG(cdev, "Unknown request 0x%x\n",
ctrl->bRequest);
goto stall;
break;
}
stall:
return -EOPNOTSUPP;
respond:
req->zero = 0;
req->length = length;
status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (status < 0)
ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value);
return status;
}
static void hidg_disable(struct usb_function *f)
{
struct f_hidg *hidg = func_to_hidg(f);
struct f_hidg_req_list *list, *next;
unsigned long flags;
usb_ep_disable(hidg->in_ep);
usb_ep_disable(hidg->out_ep);
spin_lock_irqsave(&hidg->read_spinlock, flags);
list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
free_ep_req(hidg->out_ep, list->req);
list_del(&list->list);
kfree(list);
}
spin_unlock_irqrestore(&hidg->read_spinlock, flags);
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->write_pending) {
free_ep_req(hidg->in_ep, hidg->req);
hidg->write_pending = 1;
}
hidg->req = NULL;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
}
static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_hidg *hidg = func_to_hidg(f);
struct usb_request *req_in = NULL;
unsigned long flags;
int i, status = 0;
VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
if (hidg->in_ep != NULL) {
/* restart endpoint */
usb_ep_disable(hidg->in_ep);
status = config_ep_by_speed(f->config->cdev->gadget, f,
hidg->in_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
goto fail;
}
status = usb_ep_enable(hidg->in_ep);
if (status < 0) {
ERROR(cdev, "Enable IN endpoint FAILED!\n");
goto fail;
}
hidg->in_ep->driver_data = hidg;
req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length);
if (!req_in) {
status = -ENOMEM;
goto disable_ep_in;
}
}
if (hidg->out_ep != NULL) {
/* restart endpoint */
usb_ep_disable(hidg->out_ep);
status = config_ep_by_speed(f->config->cdev->gadget, f,
hidg->out_ep);
if (status) {
ERROR(cdev, "config_ep_by_speed FAILED!\n");
goto free_req_in;
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
ERROR(cdev, "Enable OUT endpoint FAILED!\n");
goto free_req_in;
}
hidg->out_ep->driver_data = hidg;
/*
* allocate a bunch of read buffers and queue them all at once.
*/
for (i = 0; i < hidg->qlen && status == 0; i++) {
struct usb_request *req =
hidg_alloc_ep_req(hidg->out_ep,
hidg->report_length);
if (req) {
req->complete = hidg_set_report_complete;
req->context = hidg;
status = usb_ep_queue(hidg->out_ep, req,
GFP_ATOMIC);
if (status) {
ERROR(cdev, "%s queue req --> %d\n",
hidg->out_ep->name, status);
free_ep_req(hidg->out_ep, req);
}
} else {
status = -ENOMEM;
goto disable_out_ep;
}
}
}
if (hidg->in_ep != NULL) {
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->req = req_in;
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
return 0;
disable_out_ep:
usb_ep_disable(hidg->out_ep);
free_req_in:
if (req_in)
free_ep_req(hidg->in_ep, req_in);
disable_ep_in:
if (hidg->in_ep)
usb_ep_disable(hidg->in_ep);
fail:
return status;
}
static const struct file_operations f_hidg_fops = {
.owner = THIS_MODULE,
.open = f_hidg_open,
.release = f_hidg_release,
.write = f_hidg_write,
.read = f_hidg_read,
.poll = f_hidg_poll,
.llseek = noop_llseek,
};
static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_ep *ep;
struct f_hidg *hidg = func_to_hidg(f);
struct usb_string *us;
struct device *device;
int status;
dev_t dev;
/* maybe allocate device-global string IDs, and patch descriptors */
us = usb_gstrings_attach(c->cdev, ct_func_strings,
ARRAY_SIZE(ct_func_string_defs));
if (IS_ERR(us))
return PTR_ERR(us);
hidg_interface_desc.iInterface = us[CT_FUNC_HID_IDX].id;
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
hidg_interface_desc.bInterfaceNumber = status;
/* allocate instance-specific endpoints */
status = -ENODEV;
ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc);
if (!ep)
goto fail;
hidg->in_ep = ep;
ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
if (!ep)
goto fail;
hidg->out_ep = ep;
/* set descriptor dynamic values */
hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
hidg->protocol = HID_REPORT_PROTOCOL;
hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_in_comp_desc.wBytesPerInterval =
cpu_to_le16(hidg->report_length);
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_out_comp_desc.wBytesPerInterval =
cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
/*
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
hidg_desc.desc[0].wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
hidg_fs_in_ep_desc.bEndpointAddress;
hidg_hs_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
hidg_ss_in_ep_desc.bEndpointAddress =
hidg_fs_in_ep_desc.bEndpointAddress;
hidg_ss_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
status = usb_assign_descriptors(f, hidg_fs_descriptors,
hidg_hs_descriptors, hidg_ss_descriptors, NULL);
if (status)
goto fail;
spin_lock_init(&hidg->write_spinlock);
hidg->write_pending = 1;
hidg->req = NULL;
spin_lock_init(&hidg->read_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
INIT_LIST_HEAD(&hidg->completed_out_req);
/* create char device */
cdev_init(&hidg->cdev, &f_hidg_fops);
dev = MKDEV(major, hidg->minor);
status = cdev_add(&hidg->cdev, dev, 1);
if (status)
goto fail_free_descs;
device = device_create(hidg_class, NULL, dev, NULL,
"%s%d", "hidg", hidg->minor);
if (IS_ERR(device)) {
status = PTR_ERR(device);
goto del;
}
return 0;
del:
cdev_del(&hidg->cdev);
fail_free_descs:
usb_free_all_descriptors(f);
fail:
ERROR(f->config->cdev, "hidg_bind FAILED\n");
if (hidg->req != NULL)
free_ep_req(hidg->in_ep, hidg->req);
return status;
}
static inline int hidg_get_minor(void)
{
int ret;
ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
if (ret >= HIDG_MINORS) {
ida_simple_remove(&hidg_ida, ret);
ret = -ENODEV;
}
return ret;
}
static inline struct f_hid_opts *to_f_hid_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_hid_opts,
func_inst.group);
}
static void hid_attr_release(struct config_item *item)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
usb_put_function_instance(&opts->func_inst);
}
static struct configfs_item_operations hidg_item_ops = {
.release = hid_attr_release,
};
#define F_HID_OPT(name, prec, limit) \
static ssize_t f_hid_opts_##name##_show(struct config_item *item, char *page)\
{ \
struct f_hid_opts *opts = to_f_hid_opts(item); \
int result; \
\
mutex_lock(&opts->lock); \
result = sprintf(page, "%d\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
} \
\
static ssize_t f_hid_opts_##name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_hid_opts *opts = to_f_hid_opts(item); \
int ret; \
u##prec num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
goto end; \
} \
\
ret = kstrtou##prec(page, 0, &num); \
if (ret) \
goto end; \
\
if (num > limit) { \
ret = -EINVAL; \
goto end; \
} \
opts->name = num; \
ret = len; \
\
end: \
mutex_unlock(&opts->lock); \
return ret; \
} \
\
CONFIGFS_ATTR(f_hid_opts_, name)
F_HID_OPT(subclass, 8, 255);
F_HID_OPT(protocol, 8, 255);
F_HID_OPT(report_length, 16, 65535);
static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
int result;
mutex_lock(&opts->lock);
result = opts->report_desc_length;
memcpy(page, opts->report_desc, opts->report_desc_length);
mutex_unlock(&opts->lock);
return result;
}
static ssize_t f_hid_opts_report_desc_store(struct config_item *item,
const char *page, size_t len)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
int ret = -EBUSY;
char *d;
mutex_lock(&opts->lock);
if (opts->refcnt)
goto end;
if (len > PAGE_SIZE) {
ret = -ENOSPC;
goto end;
}
d = kmemdup(page, len, GFP_KERNEL);
if (!d) {
ret = -ENOMEM;
goto end;
}
kfree(opts->report_desc);
opts->report_desc = d;
opts->report_desc_length = len;
opts->report_desc_alloc = true;
ret = len;
end:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(f_hid_opts_, report_desc);
static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
return sprintf(page, "%d:%d\n", major, opts->minor);
}
CONFIGFS_ATTR_RO(f_hid_opts_, dev);
static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_subclass,
&f_hid_opts_attr_protocol,
&f_hid_opts_attr_report_length,
&f_hid_opts_attr_report_desc,
&f_hid_opts_attr_dev,
NULL,
};
static const struct config_item_type hid_func_type = {
.ct_item_ops = &hidg_item_ops,
.ct_attrs = hid_attrs,
.ct_owner = THIS_MODULE,
};
static inline void hidg_put_minor(int minor)
{
ida_simple_remove(&hidg_ida, minor);
}
static void hidg_free_inst(struct usb_function_instance *f)
{
struct f_hid_opts *opts;
opts = container_of(f, struct f_hid_opts, func_inst);
mutex_lock(&hidg_ida_lock);
hidg_put_minor(opts->minor);
if (ida_is_empty(&hidg_ida))
ghid_cleanup();
mutex_unlock(&hidg_ida_lock);
if (opts->report_desc_alloc)
kfree(opts->report_desc);
kfree(opts);
}
static struct usb_function_instance *hidg_alloc_inst(void)
{
struct f_hid_opts *opts;
struct usb_function_instance *ret;
int status = 0;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = hidg_free_inst;
ret = &opts->func_inst;
mutex_lock(&hidg_ida_lock);
if (ida_is_empty(&hidg_ida)) {
status = ghid_setup(NULL, HIDG_MINORS);
if (status) {
ret = ERR_PTR(status);
kfree(opts);
goto unlock;
}
}
opts->minor = hidg_get_minor();
if (opts->minor < 0) {
ret = ERR_PTR(opts->minor);
kfree(opts);
if (ida_is_empty(&hidg_ida))
ghid_cleanup();
goto unlock;
}
config_group_init_type_name(&opts->func_inst.group, "", &hid_func_type);
unlock:
mutex_unlock(&hidg_ida_lock);
return ret;
}
static void hidg_free(struct usb_function *f)
{
struct f_hidg *hidg;
struct f_hid_opts *opts;
hidg = func_to_hidg(f);
opts = container_of(f->fi, struct f_hid_opts, func_inst);
kfree(hidg->report_desc);
kfree(hidg);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
}
static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_hidg *hidg = func_to_hidg(f);
device_destroy(hidg_class, MKDEV(major, hidg->minor));
cdev_del(&hidg->cdev);
usb_free_all_descriptors(f);
}
static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
{
struct f_hidg *hidg;
struct f_hid_opts *opts;
/* allocate and initialize one new instance */
hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
if (!hidg)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_hid_opts, func_inst);
mutex_lock(&opts->lock);
++opts->refcnt;
hidg->minor = opts->minor;
hidg->bInterfaceSubClass = opts->subclass;
hidg->bInterfaceProtocol = opts->protocol;
hidg->report_length = opts->report_length;
hidg->report_desc_length = opts->report_desc_length;
if (opts->report_desc) {
hidg->report_desc = kmemdup(opts->report_desc,
opts->report_desc_length,
GFP_KERNEL);
if (!hidg->report_desc) {
kfree(hidg);
mutex_unlock(&opts->lock);
return ERR_PTR(-ENOMEM);
}
}
mutex_unlock(&opts->lock);
hidg->func.name = "hid";
hidg->func.bind = hidg_bind;
hidg->func.unbind = hidg_unbind;
hidg->func.set_alt = hidg_set_alt;
hidg->func.disable = hidg_disable;
hidg->func.setup = hidg_setup;
hidg->func.free_func = hidg_free;
/* this could me made configurable at some point */
hidg->qlen = 4;
return &hidg->func;
}
DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabien Chouteau");
int ghid_setup(struct usb_gadget *g, int count)
{
int status;
dev_t dev;
hidg_class = class_create(THIS_MODULE, "hidg");
if (IS_ERR(hidg_class)) {
status = PTR_ERR(hidg_class);
hidg_class = NULL;
return status;
}
status = alloc_chrdev_region(&dev, 0, count, "hidg");
if (status) {
class_destroy(hidg_class);
hidg_class = NULL;
return status;
}
major = MAJOR(dev);
minors = count;
return 0;
}
void ghid_cleanup(void)
{
if (major) {
unregister_chrdev_region(MKDEV(major, 0), minors);
major = minors = 0;
}
class_destroy(hidg_class);
hidg_class = NULL;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_995_0 |
crossvul-cpp_data_good_3527_0 | /*
* fs/nfs/nfs4proc.c
*
* Client-side procedure declarations for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_mount.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "callback.h"
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PROC
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
#define NFS4_MAX_LOOP_ON_RECOVER (10)
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state);
#ifdef CONFIG_NFS_V4_1
static int nfs41_test_stateid(struct nfs_server *, struct nfs4_state *);
static int nfs41_free_stateid(struct nfs_server *, struct nfs4_state *);
#endif
/* Prevent leaks of NFSv4 errors into userland */
static int nfs4_map_errors(int err)
{
if (err >= -1000)
return err;
switch (err) {
case -NFS4ERR_RESOURCE:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
return -EPERM;
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
break;
}
return -EIO;
}
/*
* This is our standard bitmap for GETATTR requests.
*/
const u32 nfs4_fattr_bitmap[2] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
};
const u32 nfs4_statfs_bitmap[2] = {
FATTR4_WORD0_FILES_AVAIL
| FATTR4_WORD0_FILES_FREE
| FATTR4_WORD0_FILES_TOTAL,
FATTR4_WORD1_SPACE_AVAIL
| FATTR4_WORD1_SPACE_FREE
| FATTR4_WORD1_SPACE_TOTAL
};
const u32 nfs4_pathconf_bitmap[2] = {
FATTR4_WORD0_MAXLINK
| FATTR4_WORD0_MAXNAME,
0
};
const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
| FATTR4_WORD0_MAXREAD
| FATTR4_WORD0_MAXWRITE
| FATTR4_WORD0_LEASE_TIME,
FATTR4_WORD1_TIME_DELTA
| FATTR4_WORD1_FS_LAYOUT_TYPES,
FATTR4_WORD2_LAYOUT_BLKSIZE
};
const u32 nfs4_fs_locations_bitmap[2] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID
| FATTR4_WORD0_FS_LOCATIONS,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
| FATTR4_WORD1_MOUNTED_ON_FILEID
};
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
struct nfs4_readdir_arg *readdir)
{
__be32 *start, *p;
BUG_ON(readdir->count < 80);
if (cookie > 2) {
readdir->cookie = cookie;
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
return;
}
readdir->cookie = 0;
memset(&readdir->verifier, 0, sizeof(readdir->verifier));
if (cookie == 2)
return;
/*
* NFSv4 servers do not return entries for '.' and '..'
* Therefore, we fake these entries here. We let '.'
* have cookie 0 and '..' have cookie 1. Note that
* when talking to the server, we always send cookie 0
* instead of 1 or 2.
*/
start = p = kmap_atomic(*readdir->pages, KM_USER0);
if (cookie == 0) {
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_one; /* cookie, second word */
*p++ = xdr_one; /* entry len */
memcpy(p, ".\0\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
}
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_two; /* cookie, second word */
*p++ = xdr_two; /* entry len */
memcpy(p, "..\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
kunmap_atomic(start, KM_USER0);
}
static int nfs4_wait_clnt_recover(struct nfs_client *clp)
{
int res;
might_sleep();
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
nfs_wait_bit_killable, TASK_KILLABLE);
return res;
}
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
{
int res = 0;
might_sleep();
if (*timeout <= 0)
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
schedule_timeout_killable(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
return res;
}
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = exception->state;
int ret = errorcode;
exception->retry = 0;
switch(errorcode) {
case 0:
return 0;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
if (state == NULL)
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL)
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
nfs4_schedule_session_recovery(clp->cl_session);
exception->retry = 1;
break;
#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
/* We have retried a decent amount, time to
* fail
*/
ret = -EBUSY;
break;
}
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
ret = nfs4_delay(server->client, &exception->timeout);
if (ret != 0)
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
exception->retry = 1;
break;
case -NFS4ERR_BADOWNER:
/* The following works around a Linux server bug! */
case -NFS4ERR_BADNAME:
if (server->caps & NFS_CAP_UIDGID_NOMAP) {
server->caps &= ~NFS_CAP_UIDGID_NOMAP;
exception->retry = 1;
printk(KERN_WARNING "NFS: v4 server %s "
"does not accept raw "
"uid/gids. "
"Reenabling the idmapper.\n",
server->nfs_client->cl_hostname);
}
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
wait_on_recovery:
ret = nfs4_wait_clnt_recover(clp);
if (ret == 0)
exception->retry = 1;
return ret;
}
static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
{
spin_lock(&clp->cl_lock);
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
}
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
{
do_renew_lease(server->nfs_client, timestamp);
}
#if defined(CONFIG_NFS_V4_1)
/*
* nfs4_free_slot - free a slot and efficiently update slot table.
*
* freeing a slot is trivially done by clearing its respective bit
* in the bitmap.
* If the freed slotid equals highest_used_slotid we want to update it
* so that the server would be able to size down the slot table if needed,
* otherwise we know that the highest_used_slotid is still in use.
* When updating highest_used_slotid there may be "holes" in the bitmap
* so we need to scan down from highest_used_slotid to 0 looking for the now
* highest slotid in use.
* If none found, highest_used_slotid is set to -1.
*
* Must be called while holding tbl->slot_tbl_lock
*/
static void
nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
int slotid = free_slotid;
BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
/* clear used bit in bitmap */
__clear_bit(slotid, tbl->used_slots);
/* update highest_used_slotid when it is freed */
if (slotid == tbl->highest_used_slotid) {
slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
if (slotid < tbl->max_slots)
tbl->highest_used_slotid = slotid;
else
tbl->highest_used_slotid = -1;
}
dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
free_slotid, tbl->highest_used_slotid);
}
/*
* Signal state manager thread if session fore channel is drained
*/
static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
{
struct rpc_task *task;
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
if (task)
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
return;
}
if (ses->fc_slot_table.highest_used_slotid != -1)
return;
dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
complete(&ses->fc_slot_table.complete);
}
/*
* Signal state manager thread if session back channel is drained
*/
void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
{
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
ses->bc_slot_table.highest_used_slotid != -1)
return;
dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
complete(&ses->bc_slot_table.complete);
}
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
{
struct nfs4_slot_table *tbl;
tbl = &res->sr_session->fc_slot_table;
if (!res->sr_slot) {
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
dprintk("%s: No slot\n", __func__);
return;
}
spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
}
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
unsigned long timestamp;
struct nfs_client *clp;
/*
* sr_status remains 1 if an RPC level error occurred. The server
* may or may not have processed the sequence operation..
* Proceed as if the server received and processed the sequence
* operation.
*/
if (res->sr_status == 1)
res->sr_status = NFS_OK;
/* don't increment the sequence number if the task wasn't sent */
if (!RPC_WAS_SENT(task))
goto out;
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
/* Update the slot's sequence and clientid lease timer */
++res->sr_slot->seq_nr;
timestamp = res->sr_renewal_time;
clp = res->sr_session->clp;
do_renew_lease(clp, timestamp);
/* Check sequence flags */
if (res->sr_status_flags != 0)
nfs4_schedule_lease_recovery(clp);
break;
case -NFS4ERR_DELAY:
/* The server detected a resend of the RPC call and
* returned NFS4ERR_DELAY as per Section 2.10.6.2
* of RFC5661.
*/
dprintk("%s: slot=%td seq=%d: Operation in progress\n",
__func__,
res->sr_slot - res->sr_session->fc_slot_table.slots,
res->sr_slot->seq_nr);
goto out_retry;
default:
/* Just update the slot sequence no. */
++res->sr_slot->seq_nr;
}
out:
/* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
nfs41_sequence_free_slot(res);
return 1;
out_retry:
if (!rpc_restart_call(task))
goto out;
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return 0;
}
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
if (res->sr_session == NULL)
return 1;
return nfs41_sequence_done(task, res);
}
/*
* nfs4_find_slot - efficiently look for a free slot
*
* nfs4_find_slot looks for an unset bit in the used_slots bitmap.
* If found, we mark the slot as used, update the highest_used_slotid,
* and respectively set up the sequence operation args.
* The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
*
* Note: must be called with under the slot_tbl_lock.
*/
static u8
nfs4_find_slot(struct nfs4_slot_table *tbl)
{
int slotid;
u8 ret_id = NFS4_MAX_SLOT_TABLE;
BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
__func__, tbl->used_slots[0], tbl->highest_used_slotid,
tbl->max_slots);
slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
if (slotid >= tbl->max_slots)
goto out;
__set_bit(slotid, tbl->used_slots);
if (slotid > tbl->highest_used_slotid)
tbl->highest_used_slotid = slotid;
ret_id = slotid;
out:
dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
return ret_id;
}
int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
struct rpc_task *task)
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
u8 slotid;
dprintk("--> %s\n", __func__);
/* slot already allocated? */
if (res->sr_slot != NULL)
return 0;
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
/* The state manager will wait until the slot table is empty */
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s session is draining\n", __func__);
return -EAGAIN;
}
if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s enforce FIFO order\n", __func__);
return -EAGAIN;
}
slotid = nfs4_find_slot(tbl);
if (slotid == NFS4_MAX_SLOT_TABLE) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("<-- %s: no free slots\n", __func__);
return -EAGAIN;
}
spin_unlock(&tbl->slot_tbl_lock);
rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
slot = tbl->slots + slotid;
args->sa_session = session;
args->sa_slotid = slotid;
args->sa_cache_this = cache_reply;
dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
res->sr_session = session;
res->sr_slot = slot;
res->sr_renewal_time = jiffies;
res->sr_status_flags = 0;
/*
* sr_status is only set in decode_sequence, and so will remain
* set to 1 if an rpc level failure occurs.
*/
res->sr_status = 1;
return 0;
}
EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
struct rpc_task *task)
{
struct nfs4_session *session = nfs4_get_session(server);
int ret = 0;
if (session == NULL) {
args->sa_session = NULL;
res->sr_session = NULL;
goto out;
}
dprintk("--> %s clp %p session %p sr_slot %td\n",
__func__, session->clp, session, res->sr_slot ?
res->sr_slot - session->fc_slot_table.slots : -1);
ret = nfs41_setup_sequence(session, args, res, cache_reply,
task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs41_call_sync_data {
const struct nfs_server *seq_server;
struct nfs4_sequence_args *seq_args;
struct nfs4_sequence_res *seq_res;
int cache_reply;
};
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
if (nfs4_setup_sequence(data->seq_server, data->seq_args,
data->seq_res, data->cache_reply, task))
return;
rpc_call_start(task);
}
static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs41_call_sync_prepare(task, calldata);
}
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
nfs41_sequence_done(task, data->seq_res);
}
struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_prepare = nfs41_call_sync_prepare,
.rpc_call_done = nfs41_call_sync_done,
};
struct rpc_call_ops nfs41_call_priv_sync_ops = {
.rpc_call_prepare = nfs41_call_priv_sync_prepare,
.rpc_call_done = nfs41_call_sync_done,
};
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
int privileged)
{
int ret;
struct rpc_task *task;
struct nfs41_call_sync_data data = {
.seq_server = server,
.seq_args = args,
.seq_res = res,
.cache_reply = cache_reply,
};
struct rpc_task_setup task_setup = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &nfs41_call_sync_ops,
.callback_data = &data
};
res->sr_slot = NULL;
if (privileged)
task_setup.callback_ops = &nfs41_call_priv_sync_ops;
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
ret = task->tk_status;
rpc_put_task(task);
}
return ret;
}
int _nfs4_call_sync_session(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0);
}
#else
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
return 1;
}
#endif /* CONFIG_NFS_V4_1 */
int _nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
args->sa_session = res->sr_session = NULL;
return rpc_call_sync(clnt, msg, 0);
}
static inline
int nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
args, res, cache_reply);
}
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
dir->i_version = cinfo->after;
spin_unlock(&dir->i_lock);
}
struct nfs4_opendata {
struct kref kref;
struct nfs_openargs o_arg;
struct nfs_openres o_res;
struct nfs_open_confirmargs c_arg;
struct nfs_open_confirmres c_res;
struct nfs_fattr f_attr;
struct nfs_fattr dir_attr;
struct dentry *dir;
struct dentry *dentry;
struct nfs4_state_owner *owner;
struct nfs4_state *state;
struct iattr attrs;
unsigned long timestamp;
unsigned int rpc_done : 1;
int rpc_status;
int cancelled;
};
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
const struct iattr *attrs,
gfp_t gfp_mask)
{
struct dentry *parent = dget_parent(dentry);
struct inode *dir = parent->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
goto err;
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
if (p->o_arg.seqid == NULL)
goto err_free;
nfs_sb_active(dentry->d_sb);
p->dentry = dget(dentry);
p->dir = parent;
p->owner = sp;
atomic_inc(&sp->so_count);
p->o_arg.fh = NFS_FH(dir);
p->o_arg.open_flags = flags;
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
p->o_arg.clientid = server->nfs_client->cl_clientid;
p->o_arg.id = sp->so_owner_id.id;
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_CREAT) {
u32 *s;
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
s = (u32 *) p->o_arg.u.verifier.data;
s[0] = jiffies;
s[1] = current->pid;
}
p->c_arg.fh = &p->o_res.fh;
p->c_arg.stateid = &p->o_res.stateid;
p->c_arg.seqid = p->o_arg.seqid;
nfs4_init_opendata_res(p);
kref_init(&p->kref);
return p;
err_free:
kfree(p);
err:
dput(parent);
return NULL;
}
static void nfs4_opendata_free(struct kref *kref)
{
struct nfs4_opendata *p = container_of(kref,
struct nfs4_opendata, kref);
struct super_block *sb = p->dentry->d_sb;
nfs_free_seqid(p->o_arg.seqid);
if (p->state != NULL)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
dput(p->dir);
dput(p->dentry);
nfs_sb_deactive(sb);
kfree(p);
}
static void nfs4_opendata_put(struct nfs4_opendata *p)
{
if (p != NULL)
kref_put(&p->kref, nfs4_opendata_free);
}
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
{
int ret;
ret = rpc_wait_for_completion_task(task);
return ret;
}
static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
{
int ret = 0;
if (open_mode & O_EXCL)
goto out;
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
&& state->n_rdonly != 0;
break;
case FMODE_WRITE:
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
&& state->n_wronly != 0;
break;
case FMODE_READ|FMODE_WRITE:
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
&& state->n_rdwr != 0;
}
out:
return ret;
}
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
if (delegation == NULL)
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
return 0;
nfs_mark_delegation_referenced(delegation);
return 1;
}
static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
{
switch (fmode) {
case FMODE_WRITE:
state->n_wronly++;
break;
case FMODE_READ:
state->n_rdonly++;
break;
case FMODE_READ|FMODE_WRITE:
state->n_rdwr++;
}
nfs4_state_set_mode_locked(state, state->state | fmode);
}
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
switch (fmode) {
case FMODE_READ:
set_bit(NFS_O_RDONLY_STATE, &state->flags);
break;
case FMODE_WRITE:
set_bit(NFS_O_WRONLY_STATE, &state->flags);
break;
case FMODE_READ|FMODE_WRITE:
set_bit(NFS_O_RDWR_STATE, &state->flags);
}
}
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
write_seqlock(&state->seqlock);
nfs_set_open_stateid_locked(state, stateid, fmode);
write_sequnlock(&state->seqlock);
}
static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
{
/*
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
write_seqlock(&state->seqlock);
if (deleg_stateid != NULL) {
memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
set_bit(NFS_DELEGATED_STATE, &state->flags);
}
if (open_stateid != NULL)
nfs_set_open_stateid_locked(state, open_stateid, fmode);
write_sequnlock(&state->seqlock);
spin_lock(&state->owner->so_lock);
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
}
static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *deleg_cur;
int ret = 0;
fmode &= (FMODE_READ|FMODE_WRITE);
rcu_read_lock();
deleg_cur = rcu_dereference(nfsi->delegation);
if (deleg_cur == NULL)
goto no_delegation;
spin_lock(&deleg_cur->lock);
if (nfsi->delegation != deleg_cur ||
(deleg_cur->type & fmode) != fmode)
goto no_delegation_unlock;
if (delegation == NULL)
delegation = &deleg_cur->stateid;
else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
goto no_delegation_unlock;
nfs_mark_delegation_referenced(deleg_cur);
__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
ret = 1;
no_delegation_unlock:
spin_unlock(&deleg_cur->lock);
no_delegation:
rcu_read_unlock();
if (!ret && open_stateid != NULL) {
__update_open_stateid(state, open_stateid, NULL, fmode);
ret = 1;
}
return ret;
}
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
{
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation == NULL || (delegation->type & fmode) == fmode) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
nfs_inode_return_delegation(inode);
}
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{
struct nfs4_state *state = opendata->state;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags & O_EXCL;
fmode_t fmode = opendata->o_arg.fmode;
nfs4_stateid stateid;
int ret = -EAGAIN;
for (;;) {
if (can_open_cached(state, fmode, open_mode)) {
spin_lock(&state->owner->so_lock);
if (can_open_cached(state, fmode, open_mode)) {
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
goto out_return_state;
}
spin_unlock(&state->owner->so_lock);
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
/* Save the delegation */
memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
rcu_read_unlock();
ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
if (ret != 0)
goto out;
ret = -EAGAIN;
/* Try to update the stateid using the delegation */
if (update_open_stateid(state, NULL, &stateid, fmode))
goto out_return_state;
}
out:
return ERR_PTR(ret);
out_return_state:
atomic_inc(&state->count);
return state;
}
static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
{
struct inode *inode;
struct nfs4_state *state = NULL;
struct nfs_delegation *delegation;
int ret;
if (!data->rpc_done) {
state = nfs4_try_open_cached(data);
goto out;
}
ret = -EAGAIN;
if (!(data->f_attr.valid & NFS_ATTR_FATTR))
goto err;
inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
ret = PTR_ERR(inode);
if (IS_ERR(inode))
goto err;
ret = -ENOMEM;
state = nfs4_get_open_state(inode, data->owner);
if (state == NULL)
goto err_put_inode;
if (data->o_res.delegation_type != 0) {
int delegation_flags = 0;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
"returning a delegation for "
"OPEN(CLAIM_DELEGATE_CUR)\n",
NFS_CLIENT(inode)->cl_server);
} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
else
nfs_inode_reclaim_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
}
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
iput(inode);
out:
return state;
err_put_inode:
iput(inode);
err:
return ERR_PTR(ret);
}
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_open_context *ctx;
spin_lock(&state->inode->i_lock);
list_for_each_entry(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
continue;
get_nfs_open_context(ctx);
spin_unlock(&state->inode->i_lock);
return ctx;
}
spin_unlock(&state->inode->i_lock);
return ERR_PTR(-ENOENT);
}
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs4_opendata *opendata;
opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
atomic_inc(&state->count);
return opendata;
}
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
{
struct nfs4_state *newstate;
int ret;
opendata->o_arg.open_flags = 0;
opendata->o_arg.fmode = fmode;
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
ret = _nfs4_recover_proc_open(opendata);
if (ret != 0)
return ret;
newstate = nfs4_opendata_to_nfs4_state(opendata);
if (IS_ERR(newstate))
return PTR_ERR(newstate);
nfs4_close_state(newstate, fmode);
*res = newstate;
return 0;
}
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
{
struct nfs4_state *newstate;
int ret;
/* memory barrier prior to reading state->n_* */
clear_bit(NFS_DELEGATED_STATE, &state->flags);
smp_rmb();
if (state->n_rdwr != 0) {
clear_bit(NFS_O_RDWR_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_wronly != 0) {
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_rdonly != 0) {
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
/*
* We may have performed cached opens for all three recoveries.
* Check if we need to update the current stateid.
*/
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
write_seqlock(&state->seqlock);
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
write_sequnlock(&state->seqlock);
}
return 0;
}
/*
* OPEN_RECLAIM:
* reclaim state on the server after a reboot.
*/
static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_delegation *delegation;
struct nfs4_opendata *opendata;
fmode_t delegation_type = 0;
int status;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
opendata->o_arg.fh = NFS_FH(state->inode);
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
delegation_type = delegation->type;
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata);
return status;
}
static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_do_open_reclaim(ctx, state);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_open_context *ctx;
int ret;
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = nfs4_do_open_reclaim(ctx, state);
put_nfs_open_context(ctx);
return ret;
}
static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_opendata *opendata;
int ret;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
memcpy(opendata->o_arg.u.delegation.data, stateid->data,
sizeof(opendata->o_arg.u.delegation.data));
ret = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata);
return ret;
}
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
struct nfs_server *server = NFS_SERVER(state->inode);
int err;
do {
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
case -ENOENT:
case -ESTALE:
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs4_schedule_stateid_recovery(server, state);
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
case -ENOMEM:
err = 0;
goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
if (data->rpc_status == 0) {
memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
sizeof(data->o_res.stateid.data));
nfs_confirm_seqid(&data->owner->so_seqid, 0);
renew_lease(data->o_res.server, data->timestamp);
data->rpc_done = 1;
}
}
static void nfs4_open_confirm_release(void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
if (data->cancelled == 0)
goto out_free;
/* In case of error, no cleanup! */
if (!data->rpc_done)
goto out_free;
state = nfs4_opendata_to_nfs4_state(data);
if (!IS_ERR(state))
nfs4_close_state(state, data->o_arg.fmode);
out_free:
nfs4_opendata_put(data);
}
static const struct rpc_call_ops nfs4_open_confirm_ops = {
.rpc_call_done = nfs4_open_confirm_done,
.rpc_release = nfs4_open_confirm_release,
};
/*
* Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
*/
static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
{
struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
.rpc_argp = &data->c_arg,
.rpc_resp = &data->c_res,
.rpc_cred = data->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_open_confirm_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
data->timestamp = jiffies;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0) {
data->cancelled = 1;
smp_wmb();
} else
status = data->rpc_status;
rpc_put_task(task);
return status;
}
static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state_owner *sp = data->owner;
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
return;
/*
* Check if we still need to send an OPEN call, or if we can use
* a delegation instead.
*/
if (data->state != NULL) {
struct nfs_delegation *delegation;
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
can_open_delegated(delegation, data->o_arg.fmode))
goto unlock_no_action;
rcu_read_unlock();
}
/* Update sequence id. */
data->o_arg.id = sp->so_owner_id.id;
data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->o_arg.server,
&data->o_arg.seq_args,
&data->o_res.seq_res, 1, task))
return;
rpc_call_start(task);
return;
unlock_no_action:
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
}
static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs4_open_prepare(task, calldata);
}
static void nfs4_open_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
if (!nfs4_sequence_done(task, &data->o_res.seq_res))
return;
if (task->tk_status == 0) {
switch (data->o_res.f_attr->mode & S_IFMT) {
case S_IFREG:
break;
case S_IFLNK:
data->rpc_status = -ELOOP;
break;
case S_IFDIR:
data->rpc_status = -EISDIR;
break;
default:
data->rpc_status = -ENOTDIR;
}
renew_lease(data->o_res.server, data->timestamp);
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
nfs_confirm_seqid(&data->owner->so_seqid, 0);
}
data->rpc_done = 1;
}
static void nfs4_open_release(void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
if (data->cancelled == 0)
goto out_free;
/* In case of error, no cleanup! */
if (data->rpc_status != 0 || !data->rpc_done)
goto out_free;
/* In case we need an open_confirm, no cleanup! */
if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
goto out_free;
state = nfs4_opendata_to_nfs4_state(data);
if (!IS_ERR(state))
nfs4_close_state(state, data->o_arg.fmode);
out_free:
nfs4_opendata_put(data);
}
static const struct rpc_call_ops nfs4_open_ops = {
.rpc_call_prepare = nfs4_open_prepare,
.rpc_call_done = nfs4_open_done,
.rpc_release = nfs4_open_release,
};
static const struct rpc_call_ops nfs4_recover_open_ops = {
.rpc_call_prepare = nfs4_recover_open_prepare,
.rpc_call_done = nfs4_open_done,
.rpc_release = nfs4_open_release,
};
static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
.rpc_argp = o_arg,
.rpc_resp = o_res,
.rpc_cred = data->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_open_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
data->cancelled = 0;
if (isrecover)
task_setup_data.callback_ops = &nfs4_recover_open_ops;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0) {
data->cancelled = 1;
smp_wmb();
} else
status = data->rpc_status;
rpc_put_task(task);
return status;
}
static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
{
struct inode *dir = data->dir->d_inode;
struct nfs_openres *o_res = &data->o_res;
int status;
status = nfs4_run_open_task(data, 1);
if (status != 0 || !data->rpc_done)
return status;
nfs_refresh_inode(dir, o_res->dir_attr);
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
return status;
}
return status;
}
/*
* Note: On error, nfs4_proc_open will free the struct nfs4_opendata
*/
static int _nfs4_proc_open(struct nfs4_opendata *data)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
int status;
status = nfs4_run_open_task(data, 0);
if (!data->rpc_done)
return status;
if (status != 0) {
if (status == -NFS4ERR_BADNAME &&
!(o_arg->open_flags & O_CREAT))
return -ENOENT;
return status;
}
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
return status;
}
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
return 0;
}
static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
{
unsigned int loop;
int ret;
for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
break;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
break;
nfs4_schedule_state_manager(clp);
ret = -EIO;
}
return ret;
}
static int nfs4_recover_expired_lease(struct nfs_server *server)
{
return nfs4_client_recover_expired_lease(server->nfs_client);
}
/*
* OPEN_EXPIRED:
* reclaim state on the server after a network partition.
* Assumes caller holds the appropriate lock
*/
static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs4_opendata *opendata;
int ret;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE)
d_drop(ctx->dentry);
nfs4_opendata_put(opendata);
return ret;
}
static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_open_expired(ctx, state);
switch (err) {
default:
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
} while (exception.retry);
out:
return err;
}
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_open_context *ctx;
int ret;
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = nfs4_do_open_expired(ctx, state);
put_nfs_open_context(ctx);
return ret;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
int status;
struct nfs_server *server = NFS_SERVER(state->inode);
status = nfs41_test_stateid(server, state);
if (status == NFS_OK)
return 0;
nfs41_free_stateid(server, state);
return nfs4_open_expired(sp, state);
}
#endif
/*
* on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
* fields corresponding to attributes that were used to store the verifier.
* Make sure we clobber those fields in the later setattr call
*/
static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
{
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
!(sattr->ia_valid & ATTR_ATIME_SET))
sattr->ia_valid |= ATTR_ATIME;
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
!(sattr->ia_valid & ATTR_MTIME_SET))
sattr->ia_valid |= ATTR_MTIME;
}
/*
* Returns a referenced nfs4_state
*/
static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
{
struct nfs4_state_owner *sp;
struct nfs4_state *state = NULL;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *opendata;
int status;
/* Protect against reboot recovery conflicts */
status = -ENOMEM;
if (!(sp = nfs4_get_state_owner(server, cred))) {
dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
goto out_err;
}
status = nfs4_recover_expired_lease(server);
if (status != 0)
goto err_put_state_owner;
if (dentry->d_inode != NULL)
nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
status = -ENOMEM;
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
if (opendata == NULL)
goto err_put_state_owner;
if (dentry->d_inode != NULL)
opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
status = _nfs4_proc_open(opendata);
if (status != 0)
goto err_opendata_put;
state = nfs4_opendata_to_nfs4_state(opendata);
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
if (opendata->o_arg.open_flags & O_EXCL) {
nfs4_exclusive_attrset(opendata, sattr);
nfs_fattr_init(opendata->o_res.f_attr);
status = nfs4_do_setattr(state->inode, cred,
opendata->o_res.f_attr, sattr,
state);
if (status == 0)
nfs_setattr_update_inode(state->inode, sattr);
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
}
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
return 0;
err_opendata_put:
nfs4_opendata_put(opendata);
err_put_state_owner:
nfs4_put_state_owner(sp);
out_err:
*res = NULL;
return status;
}
static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
{
struct nfs4_exception exception = { };
struct nfs4_state *res;
int status;
do {
status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
if (status == 0)
break;
/* NOTE: BAD_SEQID means the server and client disagree about the
* book-keeping w.r.t. state-changing operations
* (OPEN/CLOSE/LOCK/LOCKU...)
* It is actually a sign of a bug on the client or on the server.
*
* If we receive a BAD_SEQID error in the particular case of
* doing an OPEN, we assume that nfs_increment_open_seqid() will
* have unhashed the old state_owner for us, and that we can
* therefore safely retry using a new one. We should still warn
* the user though...
*/
if (status == -NFS4ERR_BAD_SEQID) {
printk(KERN_WARNING "NFS: v4 server %s "
" returned a bad sequence-id error!\n",
NFS_SERVER(dir)->nfs_client->cl_hostname);
exception.retry = 1;
continue;
}
/*
* BAD_STATEID on OPEN means that the server cancelled our
* state before it received the OPEN_CONFIRM.
* Recover by retrying the request as per the discussion
* on Page 181 of RFC3530.
*/
if (status == -NFS4ERR_BAD_STATEID) {
exception.retry = 1;
continue;
}
if (status == -EAGAIN) {
/* We must have found a delegation */
exception.retry = 1;
continue;
}
res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
status, &exception));
} while (exception.retry);
return res;
}
static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_setattrargs arg = {
.fh = NFS_FH(inode),
.iap = sattr,
.server = server,
.bitmask = server->attr_bitmask,
};
struct nfs_setattrres res = {
.fattr = fattr,
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
.rpc_argp = &arg,
.rpc_resp = &res,
.rpc_cred = cred,
};
unsigned long timestamp = jiffies;
int status;
nfs_fattr_init(fattr);
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
/* Use that stateid */
} else if (state != NULL) {
nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
}
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_do_setattr(inode, cred, fattr, sattr, state),
&exception);
} while (exception.retry);
return err;
}
struct nfs4_closedata {
struct inode *inode;
struct nfs4_state *state;
struct nfs_closeargs arg;
struct nfs_closeres res;
struct nfs_fattr fattr;
unsigned long timestamp;
bool roc;
u32 roc_barrier;
};
static void nfs4_free_closedata(void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state_owner *sp = calldata->state->owner;
struct super_block *sb = calldata->state->inode->i_sb;
if (calldata->roc)
pnfs_roc_release(calldata->state->inode);
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
nfs_sb_deactive(sb);
kfree(calldata);
}
static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
fmode_t fmode)
{
spin_lock(&state->owner->so_lock);
if (!(fmode & FMODE_READ))
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
if (!(fmode & FMODE_WRITE))
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
spin_unlock(&state->owner->so_lock);
}
static void nfs4_close_done(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
switch (task->tk_status) {
case 0:
if (calldata->roc)
pnfs_roc_set_barrier(state->inode,
calldata->roc_barrier);
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
nfs4_close_clear_stateid_flags(state,
calldata->arg.fmode);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
if (calldata->arg.fmode == 0)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
rpc_restart_call_prepare(task);
}
nfs_release_seqid(calldata->arg.seqid);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
int call_close = 0;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
spin_lock(&state->owner->so_lock);
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
calldata->arg.fmode &= ~FMODE_READ;
}
if (state->n_wronly == 0) {
call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
calldata->arg.fmode &= ~FMODE_WRITE;
}
}
spin_unlock(&state->owner->so_lock);
if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
return;
}
if (calldata->arg.fmode == 0) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
if (calldata->roc &&
pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
task, NULL);
return;
}
}
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
&calldata->arg.seq_args, &calldata->res.seq_res,
1, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_close_ops = {
.rpc_call_prepare = nfs4_close_prepare,
.rpc_call_done = nfs4_close_done,
.rpc_release = nfs4_free_closedata,
};
/*
* It is possible for data to be read/written from a mem-mapped file
* after the sys_close call (which hits the vfs layer as a flush).
* This means that we can't safely call nfsv4 close on a file until
* the inode is cleared. This in turn means that we are not good
* NFSv4 citizens - we do not indicate to the server to update the file's
* share state even when we are done with one of the three share
* stateid's in the inode.
*
* NOTE: Caller must be holding the sp->so_owner semaphore!
*/
int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_closedata *calldata;
struct nfs4_state_owner *sp = state->owner;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
.rpc_cred = state->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_close_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
calldata = kzalloc(sizeof(*calldata), gfp_mask);
if (calldata == NULL)
goto out;
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
if (calldata->arg.seqid == NULL)
goto out_free_calldata;
calldata->arg.fmode = 0;
calldata->arg.bitmask = server->cache_consistency_bitmask;
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->roc = roc;
nfs_sb_active(calldata->inode->i_sb);
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = 0;
if (wait)
status = rpc_wait_for_completion_task(task);
rpc_put_task(task);
return status;
out_free_calldata:
kfree(calldata);
out:
if (roc)
pnfs_roc_release(state->inode);
nfs4_put_open_state(state);
nfs4_put_state_owner(sp);
return status;
}
static struct inode *
nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
{
struct nfs4_state *state;
/* Protect against concurrent sillydeletes */
state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
if (IS_ERR(state))
return ERR_CAST(state);
ctx->state = state;
return igrab(state->inode);
}
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
if (ctx->state == NULL)
return;
if (is_sync)
nfs4_close_sync(ctx->state, ctx->mode);
else
nfs4_close_state(ctx->state, ctx->mode);
}
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
struct nfs4_server_caps_arg args = {
.fhandle = fhandle,
};
struct nfs4_server_caps_res res = {};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
NFS_CAP_CTIME|NFS_CAP_MTIME);
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
server->caps |= NFS_CAP_ACLS;
if (res.has_links != 0)
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
server->caps |= NFS_CAP_FILEID;
if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
server->caps |= NFS_CAP_MODE;
if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
server->caps |= NFS_CAP_NLINK;
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
server->caps |= NFS_CAP_OWNER;
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
server->caps |= NFS_CAP_OWNER_GROUP;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
server->caps |= NFS_CAP_ATIME;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
server->caps |= NFS_CAP_CTIME;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
server->caps |= NFS_CAP_MTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->acl_bitmask = res.acl_bitmask;
}
return status;
}
int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
struct nfs4_lookup_root_arg args = {
.bitmask = nfs4_fattr_bitmap,
};
struct nfs4_lookup_res res = {
.server = server,
.fattr = info->fattr,
.fh = fhandle,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(info->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_lookup_root(server, fhandle, info);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
break;
default:
err = nfs4_handle_exception(server, err, &exception);
}
} while (exception.retry);
return err;
}
static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, rpc_authflavor_t flavor)
{
struct rpc_auth *auth;
int ret;
auth = rpcauth_create(flavor, server->client);
if (!auth) {
ret = -EIO;
goto out;
}
ret = nfs4_lookup_root(server, fhandle, info);
out:
return ret;
}
static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int i, len, status = 0;
rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
len = gss_mech_list_pseudoflavors(&flav_array[0]);
flav_array[len] = RPC_AUTH_NULL;
len += 1;
for (i = 0; i < len; i++) {
status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
break;
}
/*
* -EACCESS could mean that the user doesn't have correct permissions
* to access the mount. It could also mean that we tried to mount
* with a gss auth flavor, but rpc.gssd isn't running. Either way,
* existing mount programs don't handle -EACCES very well so it should
* be mapped to -EPERM instead.
*/
if (status == -EACCES)
status = -EPERM;
return status;
}
/*
* get the file handle for the "/" directory on the server
*/
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int minor_version = server->nfs_client->cl_minorversion;
int status = nfs4_lookup_root(server, fhandle, info);
if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
/*
* A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
* by nfs4_map_errors() as this function exits.
*/
status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
if (status == 0)
status = nfs4_do_fsinfo(server, fhandle, info);
return nfs4_map_errors(status);
}
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
/*
* Get locations and (maybe) other attributes of a referral.
* Note that we'll actually follow the referral later when
* we detect fsid mismatch in inode revalidation
*/
static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
struct nfs_fattr *fattr, struct nfs_fh *fhandle)
{
int status = -ENOMEM;
struct page *page = NULL;
struct nfs4_fs_locations *locations = NULL;
page = alloc_page(GFP_KERNEL);
if (page == NULL)
goto out;
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
if (locations == NULL)
goto out;
status = nfs4_proc_fs_locations(dir, name, locations, page);
if (status != 0)
goto out;
/* Make sure server returned a different fsid for the referral */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
status = -EIO;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
nfs_fixup_referral_attributes(&locations->fattr);
/* replace the lookup nfs_fattr with the locations nfs_fattr */
memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
__free_page(page);
kfree(locations);
return status;
}
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_getattr_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_getattr_res res = {
.fattr = fattr,
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_getattr(server, fhandle, fattr),
&exception);
} while (exception.retry);
return err;
}
/*
* The file is not closed if it is opened due to the a request to change
* the size of the file. The open call will not be needed once the
* VFS layer lookup-intents are implemented.
*
* Close is called when the inode is destroyed.
* If we haven't opened the file for O_WRONLY, we
* need to in the size_change case to obtain a stateid.
*
* Got race?
* Because OPEN is always done by name in nfsv4, it is
* possible that we opened a different file by the same
* name. We can recognize this race condition, but we
* can't do anything about it besides returning an error.
*
* This will be fixed with VFS changes (lookup-intent).
*/
static int
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
struct inode *inode = dentry->d_inode;
struct rpc_cred *cred = NULL;
struct nfs4_state *state = NULL;
int status;
if (pnfs_ld_layoutret_on_setattr(inode))
pnfs_return_layout(inode);
nfs_fattr_init(fattr);
/* Search for an existing open(O_WRITE) file */
if (sattr->ia_valid & ATTR_FILE) {
struct nfs_open_context *ctx;
ctx = nfs_file_open_context(sattr->ia_file);
if (ctx) {
cred = ctx->cred;
state = ctx->state;
}
}
status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
if (status == 0)
nfs_setattr_update_inode(inode, sattr);
return status;
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_lookup_res res = {
.server = server,
.fattr = fattr,
.fh = fhandle,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fattr);
dprintk("NFS call lookup %s\n", name->name);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
{
memset(fh, 0, sizeof(struct nfs_fh));
fattr->fsid.major = 1;
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
fattr->nlink = 2;
}
static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
int status;
status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
switch (status) {
case -NFS4ERR_BADNAME:
return -ENOENT;
case -NFS4ERR_MOVED:
return nfs4_get_referral(dir, name, fattr, fhandle);
case -NFS4ERR_WRONGSEC:
nfs_fixup_secinfo_attributes(fattr, fhandle);
}
err = nfs4_handle_exception(NFS_SERVER(dir),
status, &exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
.bitmask = server->attr_bitmask,
};
struct nfs4_accessres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = entry->cred,
};
int mode = entry->mask;
int status;
/*
* Determine which access bits we want to ask for...
*/
if (mode & MAY_READ)
args.access |= NFS4_ACCESS_READ;
if (S_ISDIR(inode->i_mode)) {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_LOOKUP;
} else {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_EXECUTE;
}
res.fattr = nfs_alloc_fattr();
if (res.fattr == NULL)
return -ENOMEM;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
entry->mask |= MAY_READ;
if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
entry->mask |= MAY_WRITE;
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
nfs_refresh_inode(inode, res.fattr);
}
nfs_free_fattr(res.fattr);
return status;
}
static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_access(inode, entry),
&exception);
} while (exception.retry);
return err;
}
/*
* TODO: For the time being, we don't try to get any attributes
* along with any of the zero-copy operations READ, READDIR,
* READLINK, WRITE.
*
* In the case of the first three, we want to put the GETATTR
* after the read-type operation -- this is because it is hard
* to predict the length of a GETATTR response in v4, and thus
* align the READ data correctly. This means that the GETATTR
* may end up partially falling into the page cache, and we should
* shift it into the 'tail' of the xdr_buf before processing.
* To do this efficiently, we need to know the total length
* of data received, which doesn't seem to be available outside
* of the RPC layer.
*
* In the case of WRITE, we also want to put the GETATTR after
* the operation -- in this case because we want to make sure
* we get the post-operation mtime and size. This means that
* we can't use xdr_encode_pages() as written: we need a variant
* of it which would leave room in the 'tail' iovec.
*
* Both of these changes to the XDR layer would in fact be quite
* minor, but I decided to leave them for a subsequent patch.
*/
static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_readlink args = {
.fh = NFS_FH(inode),
.pgbase = pgbase,
.pglen = pglen,
.pages = &page,
};
struct nfs4_readlink_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_readlink(inode, page, pgbase, pglen),
&exception);
} while (exception.retry);
return err;
}
/*
* Got race?
* We will need to arrange for the VFS layer to provide an atomic open.
* Until then, this create/open method is prone to inefficiency and race
* conditions due to the lookup, create, and open VFS calls from sys_open()
* placed on the wire.
*
* Given the above sorry state of affairs, I'm simply sending an OPEN.
* The file will be opened again in the subsequent VFS open call
* (nfs4_proc_file_open).
*
* The open for read will just hang around to be used by any process that
* opens the file O_RDONLY. This will all be resolved with the VFS changes.
*/
static int
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nfs_open_context *ctx)
{
struct dentry *de = dentry;
struct nfs4_state *state;
struct rpc_cred *cred = NULL;
fmode_t fmode = 0;
int status = 0;
if (ctx != NULL) {
cred = ctx->cred;
de = ctx->dentry;
fmode = ctx->mode;
}
sattr->ia_mode &= ~current_umask();
state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
goto out;
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
if (ctx != NULL)
ctx->state = state;
else
nfs4_close_sync(state, fmode);
out:
return status;
}
static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs args = {
.fh = NFS_FH(dir),
.name.len = name->len,
.name.name = name->name,
.bitmask = server->attr_bitmask,
};
struct nfs_removeres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
}
nfs_free_fattr(res.dir_attr);
out:
return status;
}
static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_remove(dir, name),
&exception);
} while (exception.retry);
return err;
}
static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs *args = msg->rpc_argp;
struct nfs_removeres *res = msg->rpc_resp;
args->bitmask = server->cache_consistency_bitmask;
res->server = server;
res->seq_res.sr_slot = NULL;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
}
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
if (!nfs4_sequence_done(task, &res->seq_res))
return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_renameargs *arg = msg->rpc_argp;
struct nfs_renameres *res = msg->rpc_resp;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
arg->bitmask = server->attr_bitmask;
res->server = server;
}
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
struct inode *new_dir)
{
struct nfs_renameres *res = task->tk_msg.rpc_resp;
if (!nfs4_sequence_done(task, &res->seq_res))
return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(old_dir, &res->old_cinfo);
nfs_post_op_update_inode(old_dir, res->old_fattr);
update_changeattr(new_dir, &res->new_cinfo);
nfs_post_op_update_inode(new_dir, res->new_fattr);
return 1;
}
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs_server *server = NFS_SERVER(old_dir);
struct nfs_renameargs arg = {
.old_dir = NFS_FH(old_dir),
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
.bitmask = server->attr_bitmask,
};
struct nfs_renameres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.old_fattr = nfs_alloc_fattr();
res.new_fattr = nfs_alloc_fattr();
if (res.old_fattr == NULL || res.new_fattr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
nfs_post_op_update_inode(new_dir, res.new_fattr);
}
out:
nfs_free_fattr(res.new_fattr);
nfs_free_fattr(res.old_fattr);
return status;
}
static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(old_dir),
_nfs4_proc_rename(old_dir, old_name,
new_dir, new_name),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_link_arg arg = {
.fh = NFS_FH(inode),
.dir_fh = NFS_FH(dir),
.name = name,
.bitmask = server->attr_bitmask,
};
struct nfs4_link_res res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.fattr = nfs_alloc_fattr();
res.dir_attr = nfs_alloc_fattr();
if (res.fattr == NULL || res.dir_attr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
out:
nfs_free_fattr(res.dir_attr);
nfs_free_fattr(res.fattr);
return status;
}
static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_link(inode, dir, name),
&exception);
} while (exception.retry);
return err;
}
struct nfs4_createdata {
struct rpc_message msg;
struct nfs4_create_arg arg;
struct nfs4_create_res res;
struct nfs_fh fh;
struct nfs_fattr fattr;
struct nfs_fattr dir_fattr;
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
struct qstr *name, struct iattr *sattr, u32 ftype)
{
struct nfs4_createdata *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data != NULL) {
struct nfs_server *server = NFS_SERVER(dir);
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
data->msg.rpc_argp = &data->arg;
data->msg.rpc_resp = &data->res;
data->arg.dir_fh = NFS_FH(dir);
data->arg.server = server;
data->arg.name = name;
data->arg.attrs = sattr;
data->arg.ftype = ftype;
data->arg.bitmask = server->attr_bitmask;
data->res.server = server;
data->res.fh = &data->fh;
data->res.fattr = &data->fattr;
data->res.dir_fattr = &data->dir_fattr;
nfs_fattr_init(data->res.fattr);
nfs_fattr_init(data->res.dir_fattr);
}
return data;
}
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
}
return status;
}
static void nfs4_free_createdata(struct nfs4_createdata *data)
{
kfree(data);
}
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct page *page, unsigned int len, struct iattr *sattr)
{
struct nfs4_createdata *data;
int status = -ENAMETOOLONG;
if (len > NFS4_MAXPATHLEN)
goto out;
status = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
if (data == NULL)
goto out;
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
data->arg.u.symlink.pages = &page;
data->arg.u.symlink.len = len;
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct page *page, unsigned int len, struct iattr *sattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_symlink(dir, dentry, page,
len, sattr),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct iattr *sattr)
{
struct nfs4_createdata *data;
int status = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
if (data == NULL)
goto out;
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct iattr *sattr)
{
struct nfs4_exception exception = { };
int err;
sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mkdir(dir, dentry, sattr),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
struct nfs4_readdir_arg args = {
.fh = NFS_FH(dir),
.pages = pages,
.pgbase = 0,
.count = count,
.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
.plus = plus,
};
struct nfs4_readdir_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
int status;
dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
}
nfs_invalidate_atime(dir);
dprintk("%s: returns %d\n", __func__, status);
return status;
}
static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
_nfs4_proc_readdir(dentry, cred, cookie,
pages, count, plus),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, dev_t rdev)
{
struct nfs4_createdata *data;
int mode = sattr->ia_mode;
int status = -ENOMEM;
BUG_ON(!(sattr->ia_valid & ATTR_MODE));
BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
if (data == NULL)
goto out;
if (S_ISFIFO(mode))
data->arg.ftype = NF4FIFO;
else if (S_ISBLK(mode)) {
data->arg.ftype = NF4BLK;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
}
else if (S_ISCHR(mode)) {
data->arg.ftype = NF4CHR;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
}
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, dev_t rdev)
{
struct nfs4_exception exception = { };
int err;
sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mknod(dir, dentry, sattr, rdev),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsstat *fsstat)
{
struct nfs4_statfs_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_statfs_res res = {
.fsstat = fsstat,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fsstat->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_statfs(server, fhandle, fsstat),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *fsinfo)
{
struct nfs4_fsinfo_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_fsinfo_res res = {
.fsinfo = fsinfo,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_do_fsinfo(server, fhandle, fsinfo),
&exception);
} while (exception.retry);
return err;
}
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
nfs_fattr_init(fsinfo->fattr);
return nfs4_do_fsinfo(server, fhandle, fsinfo);
}
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_pathconf *pathconf)
{
struct nfs4_pathconf_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_pathconf_res res = {
.pathconf = pathconf,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
.rpc_argp = &args,
.rpc_resp = &res,
};
/* None of the pathconf attributes are mandatory to implement */
if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
memset(pathconf, 0, sizeof(*pathconf));
return 0;
}
nfs_fattr_init(pathconf->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_pathconf *pathconf)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_pathconf(server, fhandle, pathconf),
&exception);
} while (exception.retry);
return err;
}
void __nfs4_read_done_cb(struct nfs_read_data *data)
{
nfs_invalidate_atime(data->inode);
}
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
__nfs4_read_done_cb(data);
if (task->tk_status > 0)
renew_lease(server, data->timestamp);
return 0;
}
static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->read_done_cb ? data->read_done_cb(task, data) :
nfs4_read_done_cb(task, data);
}
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
{
data->timestamp = jiffies;
data->read_done_cb = nfs4_read_done_cb;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
}
/* Reset the the nfs_read_data to send the read to the MDS. */
void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
{
dprintk("%s Reset task for i/o through\n", __func__);
put_lseg(data->lseg);
data->lseg = NULL;
/* offsets will differ in the dense stripe case */
data->args.offset = data->mds_offset;
data->ds_clp = NULL;
data->args.fh = NFS_FH(data->inode);
data->read_done_cb = nfs4_read_done_cb;
task->tk_ops = data->mds_ops;
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
}
EXPORT_SYMBOL_GPL(nfs4_reset_read);
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
if (task->tk_status >= 0) {
renew_lease(NFS_SERVER(inode), data->timestamp);
nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
}
return 0;
}
static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->write_done_cb ? data->write_done_cb(task, data) :
nfs4_write_done_cb(task, data);
}
/* Reset the the nfs_write_data to send the write to the MDS. */
void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
{
dprintk("%s Reset task for i/o through\n", __func__);
put_lseg(data->lseg);
data->lseg = NULL;
data->ds_clp = NULL;
data->write_done_cb = nfs4_write_done_cb;
data->args.fh = NFS_FH(data->inode);
data->args.bitmask = data->res.server->cache_consistency_bitmask;
data->args.offset = data->mds_offset;
data->res.fattr = &data->fattr;
task->tk_ops = data->mds_ops;
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
}
EXPORT_SYMBOL_GPL(nfs4_reset_write);
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (data->lseg) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
if (!data->write_done_cb)
data->write_done_cb = nfs4_write_done_cb;
data->res.server = server;
data->timestamp = jiffies;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
}
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->write_done_cb(task, data);
}
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (data->lseg) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
if (!data->write_done_cb)
data->write_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
}
struct nfs4_renewdata {
struct nfs_client *client;
unsigned long timestamp;
};
/*
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW.
*/
static void nfs4_renew_release(void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(data);
}
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
unsigned long timestamp = data->timestamp;
if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
return;
if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
nfs4_schedule_lease_recovery(clp);
return;
}
nfs4_schedule_path_down_recovery(clp);
}
do_renew_lease(clp, timestamp);
}
static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_call_done = nfs4_renew_done,
.rpc_release = nfs4_renew_release,
};
static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
struct nfs4_renewdata *data;
if (renew_flags == 0)
return 0;
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->client = clp;
data->timestamp = jiffies;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
&nfs4_renew_ops, data);
}
static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
if (status < 0)
return status;
do_renew_lease(clp, now);
return 0;
}
static inline int nfs4_server_supports_acls(struct nfs_server *server)
{
return (server->caps & NFS_CAP_ACLS)
&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
}
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
* it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
* the stack.
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
struct page *newpage, **spages;
int rc = 0;
size_t len;
spages = pages;
do {
len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
newpage = alloc_page(GFP_KERNEL);
if (newpage == NULL)
goto unwind;
memcpy(page_address(newpage), buf, len);
buf += len;
buflen -= len;
*pages++ = newpage;
rc++;
} while (buflen != 0);
return rc;
unwind:
for(; rc > 0; rc--)
__free_page(spages[rc-1]);
return -ENOMEM;
}
struct nfs4_cached_acl {
int cached;
size_t len;
char data[0];
};
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
{
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
kfree(nfsi->nfs4_acl);
nfsi->nfs4_acl = acl;
spin_unlock(&inode->i_lock);
}
static void nfs4_zap_acl_attr(struct inode *inode)
{
nfs4_set_cached_acl(inode, NULL);
}
static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_cached_acl *acl;
int ret = -ENOENT;
spin_lock(&inode->i_lock);
acl = nfsi->nfs4_acl;
if (acl == NULL)
goto out;
if (buf == NULL) /* user is just asking for length */
goto out_len;
if (acl->cached == 0)
goto out;
ret = -ERANGE; /* see getxattr(2) man page */
if (acl->len > buflen)
goto out;
memcpy(buf, acl->data, acl->len);
out_len:
ret = acl->len;
out:
spin_unlock(&inode->i_lock);
return ret;
}
static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
{
struct nfs4_cached_acl *acl;
if (buf && acl_len <= PAGE_SIZE) {
acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 1;
memcpy(acl->data, buf, acl_len);
} else {
acl = kmalloc(sizeof(*acl), GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 0;
}
acl->len = acl_len;
out:
nfs4_set_cached_acl(inode, acl);
}
/*
* The getxattr API returns the required buffer length when called with a
* NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
* the required buf. On a NULL buf, we send a page of data to the server
* guessing that the ACL request can be serviced by a page. If so, we cache
* up to the page of ACL data, and the 2nd call to getxattr is serviced by
* the cache. If not so, we throw away the page, and cache the required
* length. The next getxattr call will then produce another round trip to
* the server, this time with the input buf of the required size.
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
int ret = -ENOMEM, npages, i, acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
if (npages > 1) {
/* for decoding across pages */
args.acl_scratch = alloc_page(GFP_KERNEL);
if (!args.acl_scratch)
goto out_free;
}
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
resp_buf = page_address(pages[0]);
dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, acl_len);
else
nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (acl_len > buflen)
goto out_free;
_copy_from_pages(buf, pages, res.acl_data_offset,
res.acl_len);
}
ret = acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (args.acl_scratch)
__free_page(args.acl_scratch);
return ret;
}
static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct nfs4_exception exception = { };
ssize_t ret;
do {
ret = __nfs4_get_acl_uncached(inode, buf, buflen);
if (ret >= 0)
break;
ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
} while (exception.retry);
return ret;
}
static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
int ret;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
/* -ENOENT is returned if there is no ACL or if there is an ACL
* but no cached acl data, just the acl length */
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_setaclargs arg = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_setaclres res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int ret, i;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
if (i < 0)
return i;
nfs_inode_return_delegation(inode);
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
* Free each page after tx, so the only ref left is
* held by the network stack
*/
for (; i > 0; i--)
put_page(pages[i-1]);
/*
* Acl update can result in inode attribute update.
* so mark the attribute cache invalid.
*/
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
spin_unlock(&inode->i_lock);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
}
static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
__nfs4_proc_set_acl(inode, buf, buflen),
&exception);
} while (exception.retry);
return err;
}
static int
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
{
struct nfs_client *clp = server->nfs_client;
if (task->tk_status >= 0)
return 0;
switch(task->tk_status) {
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
if (state == NULL)
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL)
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
nfs4_schedule_session_recovery(clp->cl_session);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
return -EAGAIN;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
task->tk_status = 0;
return -EAGAIN;
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
wait_on_recovery:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
}
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
unsigned short port, struct rpc_cred *cred,
struct nfs4_setclientid_res *res)
{
nfs4_verifier sc_verifier;
struct nfs4_setclientid setclientid = {
.sc_verifier = &sc_verifier,
.sc_prog = program,
.sc_cb_ident = clp->cl_cb_ident,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
.rpc_argp = &setclientid,
.rpc_resp = res,
.rpc_cred = cred,
};
__be32 *p;
int loop = 0;
int status;
p = (__be32*)sc_verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
for(;;) {
setclientid.sc_name_len = scnprintf(setclientid.sc_name,
sizeof(setclientid.sc_name), "%s/%s %s %s %u",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO),
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_id_uniquifier);
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_NETID));
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
clp->cl_ipaddr, port >> 8, port & 255);
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status != -NFS4ERR_CLID_INUSE)
break;
if (loop != 0) {
++clp->cl_id_uniquifier;
break;
}
++loop;
ssleep(clp->cl_lease_time / HZ + 1);
}
return status;
}
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
struct nfs4_setclientid_res *arg,
struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
.rpc_argp = arg,
.rpc_resp = &fsinfo,
.rpc_cred = cred,
};
unsigned long now;
int status;
now = jiffies;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status == 0) {
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
clp->cl_last_renewal = now;
spin_unlock(&clp->cl_lock);
}
return status;
}
struct nfs4_delegreturndata {
struct nfs4_delegreturnargs args;
struct nfs4_delegreturnres res;
struct nfs_fh fh;
nfs4_stateid stateid;
unsigned long timestamp;
struct nfs_fattr fattr;
int rpc_status;
};
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
renew_lease(data->res.server, data->timestamp);
break;
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
data->rpc_status = task->tk_status;
}
static void nfs4_delegreturn_release(void *calldata)
{
kfree(calldata);
}
#if defined(CONFIG_NFS_V4_1)
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
{
struct nfs4_delegreturndata *d_data;
d_data = (struct nfs4_delegreturndata *)data;
if (nfs4_setup_sequence(d_data->res.server,
&d_data->args.seq_args,
&d_data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs4_delegreturn_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs4_delegreturn_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs4_delegreturn_done,
.rpc_release = nfs4_delegreturn_release,
};
static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs4_delegreturndata *data;
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
.flags = RPC_TASK_ASYNC,
};
int status = 0;
data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
data->args.bitmask = server->attr_bitmask;
nfs_copy_fh(&data->fh, NFS_FH(inode));
memcpy(&data->stateid, stateid, sizeof(data->stateid));
data->res.fattr = &data->fattr;
data->res.server = server;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (!issync)
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0)
goto out;
status = data->rpc_status;
if (status != 0)
goto out;
nfs_refresh_inode(inode, &data->fattr);
out:
rpc_put_task(task);
return status;
}
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
switch (err) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
return 0;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
/*
* sleep, with exponential backoff, and retry the LOCK operation.
*/
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
schedule_timeout_killable(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
return timeout;
}
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
struct nfs_lockt_args arg = {
.fh = NFS_FH(inode),
.fl = request,
};
struct nfs_lockt_res res = {
.denied = request,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
.rpc_argp = &arg,
.rpc_resp = &res,
.rpc_cred = state->owner->so_cred,
};
struct nfs4_lock_state *lsp;
int status;
arg.lock_owner.clientid = clp->cl_clientid;
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
arg.lock_owner.s_dev = server->s_dev;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
break;
case -NFS4ERR_DENIED:
status = 0;
}
request->fl_ops->fl_release_private(request);
out:
return status;
}
static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(state->inode),
_nfs4_proc_getlk(state, cmd, request),
&exception);
} while (exception.retry);
return err;
}
static int do_vfs_lock(struct file *file, struct file_lock *fl)
{
int res = 0;
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
res = posix_lock_file_wait(file, fl);
break;
case FL_FLOCK:
res = flock_lock_file_wait(file, fl);
break;
default:
BUG();
}
return res;
}
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
struct file_lock fl;
const struct nfs_server *server;
unsigned long timestamp;
};
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
p->server = NFS_SERVER(inode);
return p;
}
static void nfs4_locku_release_calldata(void *data)
{
struct nfs4_unlockdata *calldata = data;
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_lock_state(calldata->lsp);
put_nfs_open_context(calldata->ctx);
kfree(calldata);
}
static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
switch (task->tk_status) {
case 0:
memcpy(calldata->lsp->ls_stateid.data,
calldata->res.stateid.data,
sizeof(calldata->lsp->ls_stateid.data));
renew_lease(calldata->server, calldata->timestamp);
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
rpc_restart_call_prepare(task);
}
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
return;
}
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(calldata->server,
&calldata->arg.seq_args,
&calldata->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_locku_ops = {
.rpc_call_prepare = nfs4_locku_prepare,
.rpc_call_done = nfs4_locku_done,
.rpc_release = nfs4_locku_release_calldata,
};
static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
.rpc_cred = ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_locku_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
/* Ensure this is an unlock - when canceling a lock, the
* canceled lock is passed in, and it won't be an unlock.
*/
fl->fl_type = F_UNLCK;
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
if (data == NULL) {
nfs_free_seqid(seqid);
return ERR_PTR(-ENOMEM);
}
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
return rpc_run_task(&task_setup_data);
}
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
struct rpc_task *task;
int status = 0;
unsigned char fl_flags = request->fl_flags;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
request->fl_flags |= FL_EXISTS;
down_read(&nfsi->rwsem);
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
up_read(&nfsi->rwsem);
goto out;
}
up_read(&nfsi->rwsem);
if (status != 0)
goto out;
/* Is this a delegated lock? */
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
goto out;
lsp = request->fl_u.nfs4_fl.owner;
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
goto out;
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
status = PTR_ERR(task);
if (IS_ERR(task))
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
rpc_put_task(task);
out:
request->fl_flags = fl_flags;
return status;
}
struct nfs4_lockdata {
struct nfs_lock_args arg;
struct nfs_lock_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
struct file_lock fl;
unsigned long timestamp;
int rpc_status;
int cancelled;
struct nfs_server *server;
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
gfp_t gfp_mask)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
if (p->arg.open_seqid == NULL)
goto out_free;
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (p->arg.lock_seqid == NULL)
goto out_free_seqid;
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->arg.lock_owner.s_dev = server->s_dev;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
out_free:
kfree(p);
return NULL;
}
static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
struct nfs4_state *state = data->lsp->ls_state;
dprintk("%s: begin!\n", __func__);
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
return;
/* Do we need to do an open_to_lock_owner? */
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
return;
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->server,
&data->arg.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs4_lock_prepare(task, calldata);
}
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
data->rpc_status = task->tk_status;
if (data->arg.new_lock_owner != 0) {
if (data->rpc_status == 0)
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
else
goto out;
}
if (data->rpc_status == 0) {
memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
sizeof(data->lsp->ls_stateid.data));
data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
}
out:
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
}
static void nfs4_lock_release(void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
nfs_free_seqid(data->arg.open_seqid);
if (data->cancelled != 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
if (!IS_ERR(task))
rpc_put_task_async(task);
dprintk("%s: cancelling lock!\n", __func__);
} else
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
kfree(data);
dprintk("%s: done!\n", __func__);
}
static const struct rpc_call_ops nfs4_lock_ops = {
.rpc_call_prepare = nfs4_lock_prepare,
.rpc_call_done = nfs4_lock_done,
.rpc_release = nfs4_lock_release,
};
static const struct rpc_call_ops nfs4_recover_lock_ops = {
.rpc_call_prepare = nfs4_recover_lock_prepare,
.rpc_call_done = nfs4_lock_done,
.rpc_release = nfs4_lock_release,
};
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
{
switch (error) {
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
if (new_lock_owner != 0 ||
(lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
nfs4_schedule_stateid_recovery(server, lsp->ls_state);
break;
case -NFS4ERR_STALE_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(server->nfs_client);
};
}
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
.rpc_cred = state->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_lock_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int ret;
dprintk("%s: begin!\n", __func__);
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
fl->fl_u.nfs4_fl.owner,
recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
if (data == NULL)
return -ENOMEM;
if (IS_SETLKW(cmd))
data->arg.block = 1;
if (recovery_type > NFS_LOCK_NEW) {
if (recovery_type == NFS_LOCK_RECLAIM)
data->arg.reclaim = NFS_LOCK_RECLAIM;
task_setup_data.callback_ops = &nfs4_recover_lock_ops;
}
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
if (ret)
nfs4_handle_setlk_error(data->server, data->lsp,
data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
dprintk("%s: done, ret = %d!\n", __func__, ret);
return ret;
}
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
/* Cache the lock if possible... */
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
err = nfs4_set_lock_state(state, request);
if (err != 0)
return err;
do {
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
switch (err) {
default:
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
} while (exception.retry);
out:
return err;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
int status;
struct nfs_server *server = NFS_SERVER(state->inode);
status = nfs41_test_stateid(server, state);
if (status == NFS_OK)
return 0;
nfs41_free_stateid(server, state);
return nfs4_lock_expired(state, request);
}
#endif
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
int status = -ENOLCK;
if ((fl_flags & FL_POSIX) &&
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
request->fl_flags |= FL_ACCESS;
status = do_vfs_lock(request->fl_file, request);
if (status < 0)
goto out;
down_read(&nfsi->rwsem);
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(request->fl_file, request);
goto out_unlock;
}
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
if (status != 0)
goto out_unlock;
/* Note: we always want to sleep here! */
request->fl_flags = fl_flags | FL_SLEEP;
if (do_vfs_lock(request->fl_file, request) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
out_unlock:
up_read(&nfsi->rwsem);
out:
request->fl_flags = fl_flags;
return status;
}
static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_proc_setlk(state, cmd, request);
if (err == -NFS4ERR_DENIED)
err = -EAGAIN;
err = nfs4_handle_exception(NFS_SERVER(state->inode),
err, &exception);
} while (exception.retry);
return err;
}
static int
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
{
struct nfs_open_context *ctx;
struct nfs4_state *state;
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
int status;
/* verify open state */
ctx = nfs_file_open_context(filp);
state = ctx->state;
if (request->fl_start < 0 || request->fl_end < 0)
return -EINVAL;
if (IS_GETLK(cmd)) {
if (state != NULL)
return nfs4_proc_getlk(state, F_GETLK, request);
return 0;
}
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
if (request->fl_type == F_UNLCK) {
if (state != NULL)
return nfs4_proc_unlck(state, cmd, request);
return 0;
}
if (state == NULL)
return -ENOLCK;
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
break;
timeout = nfs4_set_lock_task_retry(timeout);
status = -ERESTARTSYS;
if (signalled())
break;
} while(status < 0);
return status;
}
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
err = nfs4_set_lock_state(state, fl);
if (err != 0)
goto out;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
switch (err) {
default:
printk(KERN_ERR "%s: unhandled error %d.\n",
__func__, err);
case 0:
case -ESTALE:
goto out;
case -NFS4ERR_EXPIRED:
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
nfs4_schedule_stateid_recovery(server, state);
err = 0;
goto out;
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
err = 0;
goto out;
case -ENOMEM:
case -NFS4ERR_DENIED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
err = 0;
goto out;
case -NFS4ERR_DELAY:
break;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
static void nfs4_release_lockowner_release(void *calldata)
{
kfree(calldata);
}
const struct rpc_call_ops nfs4_release_lockowner_ops = {
.rpc_release = nfs4_release_lockowner_release,
};
void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
{
struct nfs_server *server = lsp->ls_state->owner->so_server;
struct nfs_release_lockowner_args *args;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
};
if (server->nfs_client->cl_mvops->minor_version != 0)
return;
args = kmalloc(sizeof(*args), GFP_NOFS);
if (!args)
return;
args->lock_owner.clientid = server->nfs_client->cl_clientid;
args->lock_owner.id = lsp->ls_id.id;
args->lock_owner.s_dev = server->s_dev;
msg.rpc_argp = args;
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
}
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
const void *buf, size_t buflen,
int flags, int type)
{
if (strcmp(key, "") != 0)
return -EINVAL;
return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
}
static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
void *buf, size_t buflen, int type)
{
if (strcmp(key, "") != 0)
return -EINVAL;
return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
}
static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
size_t list_len, const char *name,
size_t name_len, int type)
{
size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
return 0;
if (list && len <= list_len)
memcpy(list, XATTR_NAME_NFSV4_ACL, len);
return len;
}
/*
* nfs_fhget will use either the mounted_on_fileid or the fileid
*/
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
{
if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
(fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
(fattr->valid & NFS_ATTR_FATTR_FSID) &&
(fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
return;
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
NFS_ATTR_FATTR_NLINK;
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
fattr->nlink = 2;
}
int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page)
{
struct nfs_server *server = NFS_SERVER(dir);
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
.page = page,
.bitmask = bitmask,
};
struct nfs4_fs_locations_res res = {
.fs_locations = fs_locations,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("%s: start\n", __func__);
/* Ask for the fileid of the absent filesystem if mounted_on_fileid
* is not supported */
if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
else
bitmask[0] |= FATTR4_WORD0_FILEID;
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
{
int status;
struct nfs4_secinfo_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_secinfo_res res = {
.flavors = flavors,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
dprintk("NFS call secinfo %s\n", name->name);
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply secinfo: %d\n", status);
return status;
}
int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_secinfo(dir, name, flavors),
&exception);
} while (exception.retry);
return err;
}
#ifdef CONFIG_NFS_V4_1
/*
* Check the exchange flags returned by the server for invalid flags, having
* both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
* DS flags set.
*/
static int nfs4_check_cl_exchange_flags(u32 flags)
{
if (flags & ~EXCHGID4_FLAG_MASK_R)
goto out_inval;
if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
(flags & EXCHGID4_FLAG_USE_NON_PNFS))
goto out_inval;
if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
goto out_inval;
return NFS_OK;
out_inval:
return -NFS4ERR_INVAL;
}
static bool
nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
{
if (a->server_scope_sz == b->server_scope_sz &&
memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
return true;
return false;
}
/*
* nfs4_proc_exchange_id()
*
* Since the clientid has expired, all compounds using sessions
* associated with the stale clientid will be returning
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
* be in some phase of session reset.
*/
int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
{
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
.client = clp,
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
};
struct nfs41_exchange_id_res res = {
.client = clp,
};
int status;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
__be32 *p;
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
p = (u32 *)verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
args.verifier = &verifier;
args.id_len = scnprintf(args.id, sizeof(args.id),
"%s/%s.%s/%u",
clp->cl_ipaddr,
init_utsname()->nodename,
init_utsname()->domainname,
clp->cl_rpcclient->cl_auth->au_flavor);
res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
if (unlikely(!res.server_scope))
return -ENOMEM;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (!status)
status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
if (!status) {
if (clp->server_scope &&
!nfs41_same_server_scope(clp->server_scope,
res.server_scope)) {
dprintk("%s: server_scope mismatch detected\n",
__func__);
set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
kfree(clp->server_scope);
clp->server_scope = NULL;
}
if (!clp->server_scope)
clp->server_scope = res.server_scope;
else
kfree(res.server_scope);
}
dprintk("<-- %s status= %d\n", __func__, status);
return status;
}
struct nfs4_get_lease_time_data {
struct nfs4_get_lease_time_args *args;
struct nfs4_get_lease_time_res *res;
struct nfs_client *clp;
};
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
void *calldata)
{
int ret;
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
/* just setup sequence, do not trigger session recovery
since we're invoked within one */
ret = nfs41_setup_sequence(data->clp->cl_session,
&data->args->la_seq_args,
&data->res->lr_seq_res, 0, task);
BUG_ON(ret == -EAGAIN);
rpc_call_start(task);
dprintk("<-- %s\n", __func__);
}
/*
* Called from nfs4_state_manager thread for session setup, so don't recover
* from sequence operation or clientid errors.
*/
static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
{
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
}
struct rpc_call_ops nfs4_get_lease_time_ops = {
.rpc_call_prepare = nfs4_get_lease_time_prepare,
.rpc_call_done = nfs4_get_lease_time_done,
};
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
};
struct nfs4_get_lease_time_data data = {
.args = &args,
.res = &res,
.clp = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
.rpc_argp = &args,
.rpc_resp = &res,
};
struct rpc_task_setup task_setup = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_get_lease_time_ops,
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
int status;
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
status = PTR_ERR(task);
else {
status = task->tk_status;
rpc_put_task(task);
}
dprintk("<-- %s return %d\n", __func__, status);
return status;
}
/*
* Reset a slot table
*/
static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
int ivalue)
{
struct nfs4_slot *new = NULL;
int i;
int ret = 0;
dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
max_reqs, tbl->max_slots);
/* Does the newly negotiated max_reqs match the existing slot table? */
if (max_reqs != tbl->max_slots) {
ret = -ENOMEM;
new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
GFP_NOFS);
if (!new)
goto out;
ret = 0;
kfree(tbl->slots);
}
spin_lock(&tbl->slot_tbl_lock);
if (new) {
tbl->slots = new;
tbl->max_slots = max_reqs;
}
for (i = 0; i < tbl->max_slots; ++i)
tbl->slots[i].seq_nr = ivalue;
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
}
/* Destroy the slot table */
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
{
if (session->fc_slot_table.slots != NULL) {
kfree(session->fc_slot_table.slots);
session->fc_slot_table.slots = NULL;
}
if (session->bc_slot_table.slots != NULL) {
kfree(session->bc_slot_table.slots);
session->bc_slot_table.slots = NULL;
}
return;
}
/*
* Initialize slot table
*/
static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
int max_slots, int ivalue)
{
struct nfs4_slot *slot;
int ret = -ENOMEM;
BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
if (!slot)
goto out;
ret = 0;
spin_lock(&tbl->slot_tbl_lock);
tbl->max_slots = max_slots;
tbl->slots = slot;
tbl->highest_used_slotid = -1; /* no slot is currently used */
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
}
/*
* Initialize or reset the forechannel and backchannel tables
*/
static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
{
struct nfs4_slot_table *tbl;
int status;
dprintk("--> %s\n", __func__);
/* Fore channel */
tbl = &ses->fc_slot_table;
if (tbl->slots == NULL) {
status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status) /* -ENOMEM */
return status;
} else {
status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status)
return status;
}
/* Back channel */
tbl = &ses->bc_slot_table;
if (tbl->slots == NULL) {
status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
if (status)
/* Fore and back channel share a connection so get
* both slot tables or neither */
nfs4_destroy_slot_tables(ses);
} else
status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
return status;
}
struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
{
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
if (!session)
return NULL;
tbl = &session->fc_slot_table;
tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
init_completion(&tbl->complete);
tbl = &session->bc_slot_table;
tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
init_completion(&tbl->complete);
session->session_state = 1<<NFS4_SESSION_INITING;
session->clp = clp;
return session;
}
void nfs4_destroy_session(struct nfs4_session *session)
{
nfs4_proc_destroy_session(session);
dprintk("%s Destroy backchannel for xprt %p\n",
__func__, session->clp->cl_rpcclient->cl_xprt);
xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
NFS41_BC_MIN_CALLBACKS);
nfs4_destroy_slot_tables(session);
kfree(session);
}
/*
* Initialize the values to be used by the client in CREATE_SESSION
* If nfs4_init_session set the fore channel request and response sizes,
* use them.
*
* Set the back channel max_resp_sz_cached to zero to force the client to
* always set csa_cachethis to FALSE because the current implementation
* of the back channel DRC only supports caching the CB_SEQUENCE operation.
*/
static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
{
struct nfs4_session *session = args->client->cl_session;
unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
mxresp_sz = session->fc_attrs.max_resp_sz;
if (mxrqst_sz == 0)
mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
if (mxresp_sz == 0)
mxresp_sz = NFS_MAX_FILE_IO_SIZE;
/* Fore channel attributes */
args->fc_attrs.max_rqst_sz = mxrqst_sz;
args->fc_attrs.max_resp_sz = mxresp_sz;
args->fc_attrs.max_ops = NFS4_MAX_OPS;
args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_ops=%u max_reqs=%u\n",
__func__,
args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
/* Back channel attributes */
args->bc_attrs.max_rqst_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz_cached = 0;
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
args->bc_attrs.max_reqs = 1;
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
__func__,
args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
args->bc_attrs.max_reqs);
}
static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
struct nfs4_channel_attrs *sent = &args->fc_attrs;
struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
/*
* Our requested max_ops is the minimum we need; we're not
* prepared to break up compounds into smaller pieces than that.
* So, no point even trying to continue if the server won't
* cooperate:
*/
if (rcvd->max_ops < sent->max_ops)
return -EINVAL;
if (rcvd->max_reqs == 0)
return -EINVAL;
return 0;
}
static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
struct nfs4_channel_attrs *sent = &args->bc_attrs;
struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
return -EINVAL;
if (rcvd->max_resp_sz < sent->max_resp_sz)
return -EINVAL;
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
return -EINVAL;
/* These would render the backchannel useless: */
if (rcvd->max_ops == 0)
return -EINVAL;
if (rcvd->max_reqs == 0)
return -EINVAL;
return 0;
}
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
struct nfs4_session *session)
{
int ret;
ret = nfs4_verify_fore_channel_attrs(args, session);
if (ret)
return ret;
return nfs4_verify_back_channel_attrs(args, session);
}
static int _nfs4_proc_create_session(struct nfs_client *clp)
{
struct nfs4_session *session = clp->cl_session;
struct nfs41_create_session_args args = {
.client = clp,
.cb_program = NFS4_CALLBACK,
};
struct nfs41_create_session_res res = {
.client = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
nfs4_init_channel_attrs(&args);
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (!status)
/* Verify the session's negotiated channel_attrs values */
status = nfs4_verify_channel_attrs(&args, session);
if (!status) {
/* Increment the clientid slot sequence id */
clp->cl_seqid++;
}
return status;
}
/*
* Issues a CREATE_SESSION operation to the server.
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
int nfs4_proc_create_session(struct nfs_client *clp)
{
int status;
unsigned *ptr;
struct nfs4_session *session = clp->cl_session;
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
status = _nfs4_proc_create_session(clp);
if (status)
goto out;
/* Init or reset the session slot tables */
status = nfs4_setup_session_slot_tables(session);
dprintk("slot table setup returned %d\n", status);
if (status)
goto out;
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
out:
dprintk("<-- %s\n", __func__);
return status;
}
/*
* Issue the over-the-wire RPC DESTROY_SESSION.
* The caller must serialize access to this routine.
*/
int nfs4_proc_destroy_session(struct nfs4_session *session)
{
int status = 0;
struct rpc_message msg;
dprintk("--> nfs4_proc_destroy_session\n");
/* session is still being setup */
if (session->clp->cl_cons_state != NFS_CS_READY)
return status;
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
msg.rpc_argp = session;
msg.rpc_resp = NULL;
msg.rpc_cred = NULL;
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status)
printk(KERN_WARNING
"Got error %d from the server on DESTROY_SESSION. "
"Session has been destroyed regardless...\n", status);
dprintk("<-- nfs4_proc_destroy_session\n");
return status;
}
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_session *session;
unsigned int rsize, wsize;
int ret;
if (!nfs4_has_session(clp))
return 0;
session = clp->cl_session;
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
return 0;
rsize = server->rsize;
if (rsize == 0)
rsize = NFS_MAX_FILE_IO_SIZE;
wsize = server->wsize;
if (wsize == 0)
wsize = NFS_MAX_FILE_IO_SIZE;
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
ret = nfs4_recover_expired_lease(server);
if (!ret)
ret = nfs4_check_client_ready(clp);
return ret;
}
int nfs4_init_ds_session(struct nfs_client *clp)
{
struct nfs4_session *session = clp->cl_session;
int ret;
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
return 0;
ret = nfs4_client_recover_expired_lease(clp);
if (!ret)
/* Test for the DS role */
if (!is_ds_client(clp))
ret = -ENODEV;
if (!ret)
ret = nfs4_check_client_ready(clp);
return ret;
}
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
/*
* Renew the cl_session lease.
*/
struct nfs4_sequence_data {
struct nfs_client *clp;
struct nfs4_sequence_args args;
struct nfs4_sequence_res res;
};
static void nfs41_sequence_release(void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(calldata);
}
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
{
switch(task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
}
return 0;
}
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
return;
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (atomic_read(&clp->cl_count) == 1)
goto out;
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
out:
dprintk("<-- %s\n", __func__);
}
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_args *args;
struct nfs4_sequence_res *res;
args = task->tk_msg.rpc_argp;
res = task->tk_msg.rpc_resp;
if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_call_done = nfs41_sequence_call_done,
.rpc_call_prepare = nfs41_sequence_prepare,
.rpc_release = nfs41_sequence_release,
};
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
struct nfs4_sequence_data *calldata;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs41_sequence_ops,
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
};
if (!atomic_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL) {
nfs_put_client(clp);
return ERR_PTR(-ENOMEM);
}
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
calldata->clp = clp;
task_setup_data.callback_data = calldata;
return rpc_run_task(&task_setup_data);
}
static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_task *task;
int ret = 0;
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
return 0;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task))
ret = PTR_ERR(task);
else
rpc_put_task_async(task);
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_task *task;
int ret;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto out;
}
ret = rpc_wait_for_completion_task(task);
if (!ret) {
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
if (task->tk_status == 0)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
ret = task->tk_status;
}
rpc_put_task(task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs4_reclaim_complete_data {
struct nfs_client *clp;
struct nfs41_reclaim_complete_args arg;
struct nfs41_reclaim_complete_res res;
};
static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
if (nfs41_setup_sequence(calldata->clp->cl_session,
&calldata->arg.seq_args,
&calldata->res.seq_res, 0, task))
return;
rpc_call_start(task);
}
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
{
switch(task->tk_status) {
case 0:
case -NFS4ERR_COMPLETE_ALREADY:
case -NFS4ERR_WRONG_CRED: /* What to do here? */
break;
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
}
return 0;
}
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_res *res = &calldata->res.seq_res;
dprintk("--> %s\n", __func__);
if (!nfs41_sequence_done(task, res))
return;
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
}
static void nfs4_free_reclaim_complete_data(void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
kfree(calldata);
}
static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
.rpc_call_prepare = nfs4_reclaim_complete_prepare,
.rpc_call_done = nfs4_reclaim_complete_done,
.rpc_release = nfs4_free_reclaim_complete_data,
};
/*
* Issue a global reclaim complete.
*/
static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
{
struct nfs4_reclaim_complete_data *calldata;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
dprintk("--> %s\n", __func__);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL)
goto out;
calldata->clp = clp;
calldata->arg.one_fs = 0;
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out;
}
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
static void
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
/* Note the is a race here, where a CB_LAYOUTRECALL can come in
* right now covering the LAYOUTGET we are about to send.
* However, that is not so catastrophic, and there seems
* to be no way to prevent it completely.
*/
if (nfs4_setup_sequence(server, &lgp->args.seq_args,
&lgp->res.seq_res, 0, task))
return;
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
NFS_I(lgp->args.inode)->layout,
lgp->args.ctx->state)) {
rpc_exit(task, NFS4_OK);
return;
}
rpc_call_start(task);
}
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lgp->res.seq_res))
return;
switch (task->tk_status) {
case 0:
break;
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
task->tk_status = -NFS4ERR_DELAY;
/* Fall through */
default:
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_layoutget_call_ops = {
.rpc_call_prepare = nfs4_layoutget_prepare,
.rpc_call_done = nfs4_layoutget_done,
.rpc_release = nfs4_layoutget_release,
};
int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
{
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
.rpc_argp = &lgp->args,
.rpc_resp = &lgp->res,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_layoutget_call_ops,
.callback_data = lgp,
.flags = RPC_TASK_ASYNC,
};
int status = 0;
dprintk("--> %s\n", __func__);
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
if (status == 0)
status = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
static void
nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
&lrp->res.seq_res, 0, task))
return;
rpc_call_start(task);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
struct nfs_server *server;
struct pnfs_layout_hdr *lo = lrp->args.layout;
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lrp->res.seq_res))
return;
server = NFS_SERVER(lrp->args.inode);
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
spin_lock(&lo->plh_inode->i_lock);
if (task->tk_status == 0) {
if (lrp->res.lrs_present) {
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
} else
BUG_ON(!list_empty(&lo->plh_segs));
}
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutreturn_release(void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
put_layout_hdr(lrp->args.layout);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
.rpc_call_prepare = nfs4_layoutreturn_prepare,
.rpc_call_done = nfs4_layoutreturn_done,
.rpc_release = nfs4_layoutreturn_release,
};
int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
{
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
.rpc_argp = &lrp->args,
.rpc_resp = &lrp->res,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = lrp->clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_layoutreturn_call_ops,
.callback_data = lrp,
};
int status;
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = task->tk_status;
dprintk("<-- %s status=%d\n", __func__, status);
rpc_put_task(task);
return status;
}
/*
* Retrieve the list of Data Server devices from the MDS.
*/
static int _nfs4_getdevicelist(struct nfs_server *server,
const struct nfs_fh *fh,
struct pnfs_devicelist *devlist)
{
struct nfs4_getdevicelist_args args = {
.fh = fh,
.layoutclass = server->pnfs_curr_ld->id,
};
struct nfs4_getdevicelist_res res = {
.devlist = devlist,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
&res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
int nfs4_proc_getdevicelist(struct nfs_server *server,
const struct nfs_fh *fh,
struct pnfs_devicelist *devlist)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_getdevicelist(server, fh, devlist),
&exception);
} while (exception.retry);
dprintk("%s: err=%d, num_devs=%u\n", __func__,
err, devlist->num_devs);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
static int
_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
struct nfs4_getdeviceinfo_args args = {
.pdev = pdev,
};
struct nfs4_getdeviceinfo_res res = {
.pdev = pdev,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_getdeviceinfo(server, pdev),
&exception);
} while (exception.retry);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
if (nfs4_setup_sequence(server, &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
static void
nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) { /* Just ignore these failures */
case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
case NFS4ERR_BADLAYOUT: /* no layout */
case NFS4ERR_GRACE: /* loca_recalim always false */
task->tk_status = 0;
}
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
if (task->tk_status == 0)
nfs_post_op_update_inode_force_wcc(data->args.inode,
data->res.fattr);
}
static void nfs4_layoutcommit_release(void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct pnfs_layout_segment *lseg, *tmp;
unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
pnfs_cleanup_layoutcommit(data);
/* Matched by references in pnfs_set_layoutcommit */
list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
list_del_init(&lseg->pls_lc_list);
if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
&lseg->pls_flags))
put_lseg(lseg);
}
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
smp_mb__after_clear_bit();
wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
put_rpccred(data->cred);
kfree(data);
}
static const struct rpc_call_ops nfs4_layoutcommit_ops = {
.rpc_call_prepare = nfs4_layoutcommit_prepare,
.rpc_call_done = nfs4_layoutcommit_done,
.rpc_release = nfs4_layoutcommit_release,
};
int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(data->args.inode),
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
.flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
int status = 0;
dprintk("NFS: %4d initiating layoutcommit call. sync %d "
"lbw: %llu inode %lu\n",
data->task.tk_pid, sync,
data->args.lastbytewritten,
data->args.inode->i_ino);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (sync == false)
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0)
goto out;
status = task->tk_status;
out:
dprintk("%s: status %d\n", __func__, status);
rpc_put_task(task);
return status;
}
static int
_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
{
struct nfs41_secinfo_no_name_args args = {
.style = SECINFO_STYLE_CURRENT_FH,
};
struct nfs4_secinfo_res res = {
.flavors = flavors,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int
nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
case -NFS4ERR_NOTSUPP:
break;
default:
err = nfs4_handle_exception(server, err, &exception);
}
} while (exception.retry);
return err;
}
static int
nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int err;
struct page *page;
rpc_authflavor_t flavor;
struct nfs4_secinfo_flavors *flavors;
page = alloc_page(GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
flavors = page_address(page);
err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
/*
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
err = nfs4_find_root_sec(server, fhandle, info);
goto out_freepage;
}
if (err)
goto out_freepage;
flavor = nfs_find_best_sec(flavors);
if (err == 0)
err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
out_freepage:
put_page(page);
if (err == -EACCES)
return -EPERM;
out:
return err;
}
static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
{
int status;
struct nfs41_test_stateid_args args = {
.stateid = &state->stateid,
};
struct nfs41_test_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
args.seq_args.sa_session = res.seq_res.sr_session = NULL;
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
return status;
}
static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs41_test_stateid(server, state),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state)
{
int status;
struct nfs41_free_stateid_args args = {
.stateid = &state->stateid,
};
struct nfs41_free_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
args.seq_args.sa_session = res.seq_res.sr_session = NULL;
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
return status;
}
static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_free_stateid(server, state),
&exception);
} while (exception.retry);
return err;
}
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
};
#if defined(CONFIG_NFS_V4_1)
struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
.reclaim_complete = nfs41_proc_reclaim_complete,
};
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
};
#if defined(CONFIG_NFS_V4_1)
struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs41_open_expired,
.recover_lock = nfs41_lock_expired,
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
.sched_state_renewal = nfs4_proc_async_renew,
.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
.renew_lease = nfs4_proc_renew,
};
#if defined(CONFIG_NFS_V4_1)
struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
.sched_state_renewal = nfs41_proc_async_sequence,
.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
.renew_lease = nfs4_proc_sequence,
};
#endif
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.minor_version = 0,
.call_sync = _nfs4_call_sync,
.validate_stateid = nfs4_validate_delegation_stateid,
.find_root_sec = nfs4_find_root_sec,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
.state_renewal_ops = &nfs40_state_renewal_ops,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
.call_sync = _nfs4_call_sync_session,
.validate_stateid = nfs41_validate_delegation_stateid,
.find_root_sec = nfs41_find_root_sec,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
};
#endif
const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
[0] = &nfs_v4_0_minor_ops,
#if defined(CONFIG_NFS_V4_1)
[1] = &nfs_v4_1_minor_ops,
#endif
};
static const struct inode_operations nfs4_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
};
const struct nfs_rpc_ops nfs_v4_clientops = {
.version = 4, /* protocol version */
.dentry_ops = &nfs4_dentry_operations,
.dir_inode_ops = &nfs4_dir_inode_operations,
.file_inode_ops = &nfs4_file_inode_operations,
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
.create = nfs4_proc_create,
.remove = nfs4_proc_remove,
.unlink_setup = nfs4_proc_unlink_setup,
.unlink_done = nfs4_proc_unlink_done,
.rename = nfs4_proc_rename,
.rename_setup = nfs4_proc_rename_setup,
.rename_done = nfs4_proc_rename_done,
.link = nfs4_proc_link,
.symlink = nfs4_proc_symlink,
.mkdir = nfs4_proc_mkdir,
.rmdir = nfs4_proc_remove,
.readdir = nfs4_proc_readdir,
.mknod = nfs4_proc_mknod,
.statfs = nfs4_proc_statfs,
.fsinfo = nfs4_proc_fsinfo,
.pathconf = nfs4_proc_pathconf,
.set_capabilities = nfs4_server_capabilities,
.decode_dirent = nfs4_decode_dirent,
.read_setup = nfs4_proc_read_setup,
.read_done = nfs4_read_done,
.write_setup = nfs4_proc_write_setup,
.write_done = nfs4_write_done,
.commit_setup = nfs4_proc_commit_setup,
.commit_done = nfs4_commit_done,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
.secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
.prefix = XATTR_NAME_NFSV4_ACL,
.list = nfs4_xattr_list_nfs4_acl,
.get = nfs4_xattr_get_nfs4_acl,
.set = nfs4_xattr_set_nfs4_acl,
};
const struct xattr_handler *nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
NULL
};
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3527_0 |
crossvul-cpp_data_good_996_0 | // SPDX-License-Identifier: GPL-2.0
/*
* gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include "debug.h"
#include "core.h"
#include "gadget.h"
#include "io.h"
/**
* dwc3_gadget_set_test_mode - enables usb2 test modes
* @dwc: pointer to our context structure
* @mode: the mode to set (J, K SE0 NAK, Force Enable)
*
* Caller should take care of locking. This function will return 0 on
* success or -EINVAL if wrong Test Selector is passed.
*/
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
switch (mode) {
case TEST_J:
case TEST_K:
case TEST_SE0_NAK:
case TEST_PACKET:
case TEST_FORCE_EN:
reg |= mode << 1;
break;
default:
return -EINVAL;
}
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
return 0;
}
/**
* dwc3_gadget_get_link_state - gets current state of usb link
* @dwc: pointer to our context structure
*
* Caller should take care of locking. This function will
* return the link state on success (>= 0) or -ETIMEDOUT.
*/
int dwc3_gadget_get_link_state(struct dwc3 *dwc)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
return DWC3_DSTS_USBLNKST(reg);
}
/**
* dwc3_gadget_set_link_state - sets usb link to a particular state
* @dwc: pointer to our context structure
* @state: the state to put link into
*
* Caller should take care of locking. This function will
* return 0 on success or -ETIMEDOUT.
*/
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
{
int retries = 10000;
u32 reg;
/*
* Wait until device controller is ready. Only applies to 1.94a and
* later RTL.
*/
if (dwc->revision >= DWC3_REVISION_194A) {
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (reg & DWC3_DSTS_DCNRD)
udelay(5);
else
break;
}
if (retries <= 0)
return -ETIMEDOUT;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
/* set requested state */
reg |= DWC3_DCTL_ULSTCHNGREQ(state);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
/*
* The following code is racy when called from dwc3_gadget_wakeup,
* and is not needed, at least on newer versions
*/
if (dwc->revision >= DWC3_REVISION_194A)
return 0;
/* wait for a change in DSTS */
retries = 10000;
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (DWC3_DSTS_USBLNKST(reg) == state)
return 0;
udelay(5);
}
return -ETIMEDOUT;
}
/**
* dwc3_ep_inc_trb - increment a trb index.
* @index: Pointer to the TRB index to increment.
*
* The index should never point to the link TRB. After incrementing,
* if it is point to the link TRB, wrap around to the beginning. The
* link TRB is always at the last TRB entry.
*/
static void dwc3_ep_inc_trb(u8 *index)
{
(*index)++;
if (*index == (DWC3_TRB_NUM - 1))
*index = 0;
}
/**
* dwc3_ep_inc_enq - increment endpoint's enqueue pointer
* @dep: The endpoint whose enqueue pointer we're incrementing
*/
static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
{
dwc3_ep_inc_trb(&dep->trb_enqueue);
}
/**
* dwc3_ep_inc_deq - increment endpoint's dequeue pointer
* @dep: The endpoint whose enqueue pointer we're incrementing
*/
static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
{
dwc3_ep_inc_trb(&dep->trb_dequeue);
}
void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
struct dwc3_request *req, int status)
{
struct dwc3 *dwc = dep->dwc;
req->started = false;
list_del(&req->list);
req->remaining = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
if (req->trb)
usb_gadget_unmap_request_by_dev(dwc->sysdev,
&req->request, req->direction);
req->trb = NULL;
trace_dwc3_gadget_giveback(req);
if (dep->number > 1)
pm_runtime_put(dwc->dev);
}
/**
* dwc3_gadget_giveback - call struct usb_request's ->complete callback
* @dep: The endpoint to whom the request belongs to
* @req: The request we're giving back
* @status: completion code for the request
*
* Must be called with controller's lock held and interrupts disabled. This
* function will unmap @req and call its ->complete() callback to notify upper
* layers that it has completed.
*/
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
dwc3_gadget_del_and_unmap_request(dep, req, status);
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
}
/**
* dwc3_send_gadget_generic_command - issue a generic command for the controller
* @dwc: pointer to the controller context
* @cmd: the command to be issued
* @param: command parameter
*
* Caller should take care of locking. Issue @cmd with a given @param to @dwc
* and wait for its completion.
*/
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
{
u32 timeout = 500;
int status = 0;
int ret = 0;
u32 reg;
dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
do {
reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
if (!(reg & DWC3_DGCMD_CMDACT)) {
status = DWC3_DGCMD_STATUS(reg);
if (status)
ret = -EINVAL;
break;
}
} while (--timeout);
if (!timeout) {
ret = -ETIMEDOUT;
status = -ETIMEDOUT;
}
trace_dwc3_gadget_generic_cmd(cmd, param, status);
return ret;
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
/**
* dwc3_send_gadget_ep_cmd - issue an endpoint command
* @dep: the endpoint to which the command is going to be issued
* @cmd: the command to be issued
* @params: parameters to the command
*
* Caller should handle locking. This function will issue @cmd with given
* @params to @dep and wait for its completion.
*/
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 1000;
u32 reg;
int cmd_status = 0;
int susphy = false;
int ret = -EINVAL;
/*
* Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
* we're issuing an endpoint command, we must check if
* GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
*
* We will also set SUSPHY bit to what it was before returning as stated
* by the same section on Synopsys databook.
*/
if (dwc->gadget.speed <= USB_SPEED_HIGH) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
susphy = true;
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int needs_wakeup;
needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
dwc->link_state == DWC3_LINK_STATE_U2 ||
dwc->link_state == DWC3_LINK_STATE_U3);
if (unlikely(needs_wakeup)) {
ret = __dwc3_gadget_wakeup(dwc);
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
ret);
}
}
dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
/*
* Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
* not relying on XferNotReady, we can make use of a special "No
* Response Update Transfer" command where we should clear both CmdAct
* and CmdIOC bits.
*
* With this, we don't need to wait for command completion and can
* straight away issue further commands to the endpoint.
*
* NOTICE: We're making an assumption that control endpoints will never
* make use of Update Transfer command. This is a safe assumption
* because we can never have more than one request at a time with
* Control Endpoints. If anybody changes that assumption, this chunk
* needs to be updated accordingly.
*/
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
!usb_endpoint_xfer_isoc(desc))
cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
else
cmd |= DWC3_DEPCMD_CMDACT;
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
cmd_status = DWC3_DEPCMD_STATUS(reg);
switch (cmd_status) {
case 0:
ret = 0;
break;
case DEPEVT_TRANSFER_NO_RESOURCE:
ret = -EINVAL;
break;
case DEPEVT_TRANSFER_BUS_EXPIRY:
/*
* SW issues START TRANSFER command to
* isochronous ep with future frame interval. If
* future interval time has already passed when
* core receives the command, it will respond
* with an error status of 'Bus Expiry'.
*
* Instead of always returning -EINVAL, let's
* give a hint to the gadget driver that this is
* the case by returning -EAGAIN.
*/
ret = -EAGAIN;
break;
default:
dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
}
break;
}
} while (--timeout);
if (timeout == 0) {
ret = -ETIMEDOUT;
cmd_status = -ETIMEDOUT;
}
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (ret == 0) {
switch (DWC3_DEPCMD_CMD(cmd)) {
case DWC3_DEPCMD_STARTTRANSFER:
dep->flags |= DWC3_EP_TRANSFER_STARTED;
break;
case DWC3_DEPCMD_ENDTRANSFER:
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
break;
default:
/* nothing */
break;
}
}
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
return ret;
}
static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd = DWC3_DEPCMD_CLEARSTALL;
/*
* As of core revision 2.60a the recommended programming model
* is to set the ClearPendIN bit when issuing a Clear Stall EP
* command for IN endpoints. This is to prevent an issue where
* some (non-compliant) hosts may not send ACK TPs for pending
* IN transfers due to a mishandled error condition. Synopsys
* STAR 9000614252.
*/
if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
(dwc->gadget.speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
memset(¶ms, 0, sizeof(params));
return dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
}
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
struct dwc3_trb *trb)
{
u32 offset = (char *) trb - (char *) dep->trb_pool;
return dep->trb_pool_dma + offset;
}
static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
if (dep->trb_pool)
return 0;
dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
&dep->trb_pool_dma, GFP_KERNEL);
if (!dep->trb_pool) {
dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
dep->name);
return -ENOMEM;
}
return 0;
}
static void dwc3_free_trb_pool(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
dep->trb_pool, dep->trb_pool_dma);
dep->trb_pool = NULL;
dep->trb_pool_dma = 0;
}
static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
/**
* dwc3_gadget_start_config - configure ep resources
* @dwc: pointer to our controller context structure
* @dep: endpoint that is being enabled
*
* Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
* completion, it will set Transfer Resource for all available endpoints.
*
* The assignment of transfer resources cannot perfectly follow the data book
* due to the fact that the controller driver does not have all knowledge of the
* configuration in advance. It is given this information piecemeal by the
* composite gadget framework after every SET_CONFIGURATION and
* SET_INTERFACE. Trying to follow the databook programming model in this
* scenario can cause errors. For two reasons:
*
* 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
* %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
* incorrect in the scenario of multiple interfaces.
*
* 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
* endpoint on alt setting (8.1.6).
*
* The following simplified method is used instead:
*
* All hardware endpoints can be assigned a transfer resource and this setting
* will stay persistent until either a core reset or hibernation. So whenever we
* do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
* %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
* guaranteed that there are as many transfer resources as endpoints.
*
* This function is called for each endpoint when it is being enabled but is
* triggered only when called for EP0-out, which always happens first, and which
* should only happen in one of the above conditions.
*/
static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int i;
int ret;
if (dep->number)
return 0;
memset(¶ms, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret)
return ret;
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
struct dwc3_ep *dep = dwc->eps[i];
if (!dep)
continue;
ret = dwc3_gadget_set_xfer_resource(dwc, dep);
if (ret)
return ret;
}
return 0;
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
bool modify, bool restore)
{
const struct usb_ss_ep_comp_descriptor *comp_desc;
const struct usb_endpoint_descriptor *desc;
struct dwc3_gadget_ep_cmd_params params;
if (dev_WARN_ONCE(dwc->dev, modify && restore,
"Can't modify and restore\n"))
return -EINVAL;
comp_desc = dep->endpoint.comp_desc;
desc = dep->endpoint.desc;
memset(¶ms, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
/* Burst size is only needed in SuperSpeed mode */
if (dwc->gadget.speed >= USB_SPEED_SUPER) {
u32 burst = dep->endpoint.maxburst;
params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
}
if (modify) {
params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
} else if (restore) {
params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
params.param2 |= dep->saved_state;
} else {
params.param0 |= DWC3_DEPCFG_ACTION_INIT;
}
if (usb_endpoint_xfer_control(desc))
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
}
if (!usb_endpoint_xfer_control(desc))
params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
/*
* We are doing 1:1 mapping for endpoints, meaning
* Physical Endpoints 2 maps to Logical Endpoint 2 and
* so on. We consider the direction bit as part of the physical
* endpoint number. So USB endpoint 0x81 is 0x03.
*/
params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
/*
* We must use the lower 16 TX FIFOs even though
* HW might have more
*/
if (dep->direction)
params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
if (desc->bInterval) {
params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
dep->interval = 1 << (desc->bInterval - 1);
}
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms);
}
static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
memset(¶ms, 0x00, sizeof(params));
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
¶ms);
}
/**
* __dwc3_gadget_ep_enable - initializes a hw endpoint
* @dep: endpoint to be initialized
* @modify: if true, modify existing endpoint configuration
* @restore: if true, restore endpoint configuration from scratch buffer
*
* Caller should take care of locking. Execute all necessary commands to
* initialize a HW endpoint so it can be used by a gadget driver.
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
bool modify, bool restore)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 reg;
int ret;
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_start_config(dwc, dep);
if (ret)
return ret;
}
ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore);
if (ret)
return ret;
if (!(dep->flags & DWC3_EP_ENABLED)) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
init_waitqueue_head(&dep->wait_end_transfer);
if (usb_endpoint_xfer_control(desc))
goto out;
/* Initialize the TRB ring */
dep->trb_dequeue = 0;
dep->trb_enqueue = 0;
memset(dep->trb_pool, 0,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
/*
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
if (usb_endpoint_xfer_bulk(desc)) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
u32 cmd;
memset(¶ms, 0, sizeof(params));
trb = &dep->trb_pool[0];
trb_dma = dwc3_trb_dma_offset(dep, trb);
params.param0 = upper_32_bits(trb_dma);
params.param1 = lower_32_bits(trb_dma);
cmd = DWC3_DEPCMD_STARTTRANSFER;
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret < 0)
return ret;
dep->flags |= DWC3_EP_BUSY;
dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
WARN_ON_ONCE(!dep->resource_index);
}
out:
trace_dwc3_gadget_ep_enable(dep);
return 0;
}
static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
dwc3_stop_active_transfer(dwc, dep->number, true);
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) {
req = next_request(&dep->started_list);
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
while (!list_empty(&dep->pending_list)) {
req = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
}
/**
* __dwc3_gadget_ep_disable - disables a hw endpoint
* @dep: the endpoint to disable
*
* This function undoes what __dwc3_gadget_ep_enable did and also removes
* requests which are currently being processed by the hardware and those which
* are not yet scheduled.
*
* Caller should take care of locking.
*/
static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
trace_dwc3_gadget_ep_disable(dep);
dwc3_remove_requests(dwc, dep);
/* make sure HW endpoint isn't stalled */
if (dep->flags & DWC3_EP_STALL)
__dwc3_gadget_ep_set_halt(dep, 0, false);
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
reg &= ~DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
dep->stream_capable = false;
dep->type = 0;
dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
/* Clear out the ep descriptors for non-ep0 */
if (dep->number > 1) {
dep->endpoint.comp_desc = NULL;
dep->endpoint.desc = NULL;
}
return 0;
}
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
return -EINVAL;
}
static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
{
return -EINVAL;
}
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct dwc3_ep *dep;
struct dwc3 *dwc;
unsigned long flags;
int ret;
if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
pr_debug("dwc3: invalid parameters\n");
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
pr_debug("dwc3: missing wMaxPacketSize\n");
return -EINVAL;
}
dep = to_dwc3_ep(ep);
dwc = dep->dwc;
if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
"%s is already enabled\n",
dep->name))
return 0;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_enable(dep, false, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_ep_disable(struct usb_ep *ep)
{
struct dwc3_ep *dep;
struct dwc3 *dwc;
unsigned long flags;
int ret;
if (!ep) {
pr_debug("dwc3: invalid parameters\n");
return -EINVAL;
}
dep = to_dwc3_ep(ep);
dwc = dep->dwc;
if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
"%s is already disabled\n",
dep->name))
return 0;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_disable(dep);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct dwc3_request *req;
struct dwc3_ep *dep = to_dwc3_ep(ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->epnum = dep->number;
req->dep = dep;
dep->allocated_requests++;
trace_dwc3_alloc_request(req);
return &req->request;
}
static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_ep *dep = to_dwc3_ep(ep);
dep->allocated_requests--;
trace_dwc3_free_request(req);
kfree(req);
}
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep);
static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt)
{
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = &dwc->gadget;
enum usb_device_speed speed = gadget->speed;
dwc3_ep_inc_enq(dep);
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
trb->bph = upper_32_bits(dma);
switch (usb_endpoint_type(dep->endpoint.desc)) {
case USB_ENDPOINT_XFER_CONTROL:
trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
break;
case USB_ENDPOINT_XFER_ISOC:
if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
/*
* USB Specification 2.0 Section 5.9.2 states that: "If
* there is only a single transaction in the microframe,
* only a DATA0 data packet PID is used. If there are
* two transactions per microframe, DATA1 is used for
* the first transaction data packet and DATA0 is used
* for the second transaction data packet. If there are
* three transactions per microframe, DATA2 is used for
* the first transaction data packet, DATA1 is used for
* the second, and DATA0 is used for the third."
*
* IOW, we should satisfy the following cases:
*
* 1) length <= maxpacket
* - DATA0
*
* 2) maxpacket < length <= (2 * maxpacket)
* - DATA1, DATA0
*
* 3) (2 * maxpacket) < length <= (3 * maxpacket)
* - DATA2, DATA1, DATA0
*/
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
if (length <= (2 * maxp))
mult--;
if (length <= maxp)
mult--;
trb->size |= DWC3_TRB_SIZE_PCM1(mult);
}
} else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
}
/* always enable Interrupt on Missed ISOC */
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
break;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
trb->ctrl = DWC3_TRBCTL_NORMAL;
break;
default:
/*
* This is only possible with faulty memory because we
* checked it already :)
*/
dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
usb_endpoint_type(dep->endpoint.desc));
}
/* always enable Continue on Short Packet */
if (usb_endpoint_dir_out(dep->endpoint.desc)) {
trb->ctrl |= DWC3_TRB_CTRL_CSP;
if (short_not_ok)
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
}
if ((!no_interrupt && !chain) ||
(dwc3_calc_trbs_left(dep) == 0))
trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
trb->ctrl |= DWC3_TRB_CTRL_HWO;
trace_dwc3_prepare_trb(dep, trb);
}
/**
* dwc3_prepare_one_trb - setup one TRB from one request
* @dep: endpoint for which this request is prepared
* @req: dwc3_request pointer
* @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type.
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, unsigned chain, unsigned node)
{
struct dwc3_trb *trb;
unsigned length = req->request.length;
unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt;
dma_addr_t dma = req->request.dma;
trb = &dep->trb_pool[dep->trb_enqueue];
if (!req->trb) {
dwc3_gadget_move_started_request(req);
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
dep->queued_requests++;
}
__dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
stream_id, short_not_ok, no_interrupt);
}
/**
* dwc3_ep_prev_trb - returns the previous TRB in the ring
* @dep: The endpoint with the TRB ring
* @index: The index of the current TRB in the ring
*
* Returns the TRB prior to the one pointed to by the index. If the
* index is 0, we will wrap backwards, skip the link TRB, and return
* the one just before that.
*/
static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
{
u8 tmp = index;
if (!tmp)
tmp = DWC3_TRB_NUM - 1;
return &dep->trb_pool[tmp - 1];
}
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{
struct dwc3_trb *tmp;
u8 trbs_left;
/*
* If enqueue & dequeue are equal than it is either full or empty.
*
* One way to know for sure is if the TRB right before us has HWO bit
* set or not. If it has, then we're definitely full and can't fit any
* more transfers in our ring.
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
return 0;
return DWC3_TRB_NUM - 1;
}
trbs_left = dep->trb_dequeue - dep->trb_enqueue;
trbs_left &= (DWC3_TRB_NUM - 1);
if (dep->trb_dequeue < dep->trb_enqueue)
trbs_left--;
return trbs_left;
}
static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
struct dwc3_request *req)
{
struct scatterlist *sg = req->sg;
struct scatterlist *s;
int i;
for_each_sg(sg, s, req->num_pending_sgs, i) {
unsigned int length = req->request.length;
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp;
unsigned chain = true;
if (sg_is_last(s))
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
req->unaligned = true;
/* prepare normal TRB */
dwc3_prepare_one_trb(dep, req, true, i);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
maxp - rem, false, 0,
req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
} else {
dwc3_prepare_one_trb(dep, req, chain, i);
}
if (!dwc3_calc_trbs_left(dep))
break;
}
}
static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
struct dwc3_request *req)
{
unsigned int length = req->request.length;
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
req->unaligned = true;
/* prepare normal TRB */
dwc3_prepare_one_trb(dep, req, true, 0);
/* Now prepare one extra TRB to align transfer size */
trb = &dep->trb_pool[dep->trb_enqueue];
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
false, 0, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
} else if (req->request.zero && req->request.length &&
(IS_ALIGNED(req->request.length,dep->endpoint.maxpacket))) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
req->zero = true;
/* prepare normal TRB */
dwc3_prepare_one_trb(dep, req, true, 0);
/* Now prepare one extra TRB to handle ZLP */
trb = &dep->trb_pool[dep->trb_enqueue];
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
false, 0, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
} else {
dwc3_prepare_one_trb(dep, req, false, 0);
}
}
/*
* dwc3_prepare_trbs - setup TRBs from requests
* @dep: endpoint for which requests are being prepared
*
* The function goes through the requests list and sets up TRBs for the
* transfers. The function returns once there are no more TRBs available or
* it runs out of requests.
*/
static void dwc3_prepare_trbs(struct dwc3_ep *dep)
{
struct dwc3_request *req, *n;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
/*
* We can get in a situation where there's a request in the started list
* but there weren't enough TRBs to fully kick it in the first time
* around, so it has been waiting for more TRBs to be freed up.
*
* In that case, we should check if we have a request with pending_sgs
* in the started list and prepare TRBs for that request first,
* otherwise we will prepare TRBs completely out of order and that will
* break things.
*/
list_for_each_entry(req, &dep->started_list, list) {
if (req->num_pending_sgs > 0)
dwc3_prepare_one_trb_sg(dep, req);
if (!dwc3_calc_trbs_left(dep))
return;
}
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
struct dwc3 *dwc = dep->dwc;
int ret;
ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
dep->direction);
if (ret)
return;
req->sg = req->request.sg;
req->num_pending_sgs = req->request.num_mapped_sgs;
if (req->num_pending_sgs > 0)
dwc3_prepare_one_trb_sg(dep, req);
else
dwc3_prepare_one_trb_linear(dep, req);
if (!dwc3_calc_trbs_left(dep))
return;
}
}
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
int starting;
int ret;
u32 cmd;
if (!dwc3_calc_trbs_left(dep))
return 0;
starting = !(dep->flags & DWC3_EP_BUSY);
dwc3_prepare_trbs(dep);
req = next_request(&dep->started_list);
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return 0;
}
memset(¶ms, 0, sizeof(params));
if (starting) {
params.param0 = upper_32_bits(req->trb_dma);
params.param1 = lower_32_bits(req->trb_dma);
cmd = DWC3_DEPCMD_STARTTRANSFER;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
} else {
cmd = DWC3_DEPCMD_UPDATETRANSFER |
DWC3_DEPCMD_PARAM(dep->resource_index);
}
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret < 0) {
/*
* FIXME we need to iterate over the list of requests
* here and stop, unmap, free and del each of the linked
* requests instead of what we do now.
*/
if (req->trb)
memset(req->trb, 0, sizeof(struct dwc3_trb));
dep->queued_requests--;
dwc3_gadget_del_and_unmap_request(dep, req, ret);
return ret;
}
dep->flags |= DWC3_EP_BUSY;
if (starting) {
dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
WARN_ON_ONCE(!dep->resource_index);
}
return 0;
}
static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
{
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
return DWC3_DSTS_SOFFN(reg);
}
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
if (list_empty(&dep->pending_list)) {
dev_info(dwc->dev, "%s: ran out of requests\n",
dep->name);
dep->flags |= DWC3_EP_PENDING_REQUEST;
return;
}
/*
* Schedule the first trb for one interval in the future or at
* least 4 microframes.
*/
dep->frame_number = cur_uf + max_t(u32, 4, dep->interval);
__dwc3_gadget_kick_transfer(dep);
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
{
u32 cur_uf, mask;
mask = ~(dep->interval - 1);
cur_uf = event->parameters & mask;
__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
}
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
if (!dep->endpoint.desc) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
&req->request, req->dep->name))
return -EINVAL;
pm_runtime_get(dwc->dev);
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->direction = dep->direction;
req->epnum = dep->number;
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
* (micro-)frame number.
*
* Without this trick, we are very, very likely gonna get Bus Expiry
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
} else {
u32 cur_uf;
cur_uf = __dwc3_gadget_get_frame(dwc);
__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
dep->flags &= ~DWC3_EP_PENDING_REQUEST;
}
return 0;
}
if ((dep->flags & DWC3_EP_BUSY) &&
!(dep->flags & DWC3_EP_MISSED_ISOC))
goto out;
return 0;
}
out:
return __dwc3_gadget_kick_transfer(dep);
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_queue(dep, req);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *request)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_request *r = NULL;
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret = 0;
trace_dwc3_ep_dequeue(req);
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(r, &dep->pending_list, list) {
if (r == req)
break;
}
if (r != req) {
list_for_each_entry(r, &dep->started_list, list) {
if (r == req)
break;
}
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number, true);
/*
* If request was already started, this means we had to
* stop the transfer. With that we also need to ignore
* all TRBs used by the request, however TRBs can only
* be modified after completion of END_TRANSFER
* command. So what we do here is that we wait for
* END_TRANSFER completion and only after that, we jump
* over TRBs by clearing HWO and incrementing dequeue
* pointer.
*
* Note that we have 2 possible types of transfers here:
*
* i) Linear buffer request
* ii) SG-list based request
*
* SG-list based requests will have r->num_pending_sgs
* set to a valid number (> 0). Linear requests,
* normally use a single TRB.
*
* For each of these two cases, if r->unaligned flag is
* set, one extra TRB has been used to align transfer
* size to wMaxPacketSize.
*
* All of these cases need to be taken into
* consideration so we don't mess up our TRB ring
* pointers.
*/
wait_event_lock_irq(dep->wait_end_transfer,
!(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
dwc->lock);
if (!r->trb)
goto out1;
if (r->num_pending_sgs) {
struct dwc3_trb *trb;
int i = 0;
for (i = 0; i < r->num_pending_sgs; i++) {
trb = r->trb + i;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
if (r->unaligned || r->zero) {
trb = r->trb + r->num_pending_sgs + 1;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
} else {
struct dwc3_trb *trb = r->trb;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
if (r->unaligned || r->zero) {
trb = r->trb + 1;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
}
}
goto out1;
}
dev_err(dwc->dev, "request %pK was not queued to %s\n",
request, ep->name);
ret = -EINVAL;
goto out0;
}
out1:
/* giveback the request */
dep->queued_requests--;
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc = dep->dwc;
int ret;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
return -EINVAL;
}
memset(¶ms, 0x00, sizeof(params));
if (value) {
struct dwc3_trb *trb;
unsigned transfer_in_flight;
unsigned started;
if (dep->flags & DWC3_EP_STALL)
return 0;
if (dep->number > 1)
trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
else
trb = &dwc->ep0_trb[dep->trb_enqueue];
transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
started = !list_empty(&dep->started_list);
if (!protocol && ((dep->direction && transfer_in_flight) ||
(!dep->direction && started))) {
return -EAGAIN;
}
ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
¶ms);
if (ret)
dev_err(dwc->dev, "failed to set STALL on %s\n",
dep->name);
else
dep->flags |= DWC3_EP_STALL;
} else {
if (!(dep->flags & DWC3_EP_STALL))
return 0;
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
else
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
}
return ret;
}
static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_set_halt(dep, value, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
dep->flags |= DWC3_EP_WEDGE;
if (dep->number == 0 || dep->number == 1)
ret = __dwc3_gadget_ep0_set_halt(ep, 1);
else
ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
/* -------------------------------------------------------------------------- */
static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
};
static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
.enable = dwc3_gadget_ep0_enable,
.disable = dwc3_gadget_ep0_disable,
.alloc_request = dwc3_gadget_ep_alloc_request,
.free_request = dwc3_gadget_ep_free_request,
.queue = dwc3_gadget_ep0_queue,
.dequeue = dwc3_gadget_ep_dequeue,
.set_halt = dwc3_gadget_ep0_set_halt,
.set_wedge = dwc3_gadget_ep_set_wedge,
};
static const struct usb_ep_ops dwc3_gadget_ep_ops = {
.enable = dwc3_gadget_ep_enable,
.disable = dwc3_gadget_ep_disable,
.alloc_request = dwc3_gadget_ep_alloc_request,
.free_request = dwc3_gadget_ep_free_request,
.queue = dwc3_gadget_ep_queue,
.dequeue = dwc3_gadget_ep_dequeue,
.set_halt = dwc3_gadget_ep_set_halt,
.set_wedge = dwc3_gadget_ep_set_wedge,
};
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_get_frame(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
return __dwc3_gadget_get_frame(dwc);
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
{
int retries;
int ret;
u32 reg;
u8 link_state;
u8 speed;
/*
* According to the Databook Remote wakeup request should
* be issued only when the device is in early suspend state.
*
* We can check that via USB Link State bits in DSTS register.
*/
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
if ((speed == DWC3_DSTS_SUPERSPEED) ||
(speed == DWC3_DSTS_SUPERSPEED_PLUS))
return 0;
link_state = DWC3_DSTS_USBLNKST(reg);
switch (link_state) {
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
break;
default:
return -EINVAL;
}
ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
if (ret < 0) {
dev_err(dwc->dev, "failed to put link in Recovery\n");
return ret;
}
/* Recent versions do this automatically */
if (dwc->revision < DWC3_REVISION_194A) {
/* write zeroes to Link Change Request */
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
/* poll until Link State changes to ON */
retries = 20000;
while (retries--) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
/* in HS, means ON */
if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
break;
}
if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
dev_err(dwc->dev, "failed to send remote wakeup\n");
return -EINVAL;
}
return 0;
}
static int dwc3_gadget_wakeup(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_wakeup(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
int is_selfpowered)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
g->is_selfpowered = !!is_selfpowered;
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
u32 timeout = 500;
if (pm_runtime_suspended(dwc->dev))
return 0;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (dwc->revision <= DWC3_REVISION_187A) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
}
if (dwc->revision >= DWC3_REVISION_194A)
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
if (dwc->has_hibernation)
reg |= DWC3_DCTL_KEEP_CONNECT;
dwc->pullups_connected = true;
} else {
reg &= ~DWC3_DCTL_RUN_STOP;
if (dwc->has_hibernation && !suspend)
reg &= ~DWC3_DCTL_KEEP_CONNECT;
dwc->pullups_connected = false;
}
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
do {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
if (!timeout)
return -ETIMEDOUT;
return 0;
}
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
is_on = !!is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
*/
if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
reinit_completion(&dwc->ep0_in_setup);
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
if (ret == 0) {
dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
return -ETIMEDOUT;
}
}
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
{
u32 reg;
/* Enable all but Start and End of Frame IRQs */
reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
DWC3_DEVTEN_EVNTOVERFLOWEN |
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
DWC3_DEVTEN_CONNECTDONEEN |
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
if (dwc->revision < DWC3_REVISION_250A)
reg |= DWC3_DEVTEN_ULSTCNGEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
{
/* mask all interrupts */
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
}
static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
/**
* dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
* @dwc: pointer to our context structure
*
* The following looks like complex but it's actually very simple. In order to
* calculate the number of packets we can burst at once on OUT transfers, we're
* gonna use RxFIFO size.
*
* To calculate RxFIFO size we need two numbers:
* MDWIDTH = size, in bits, of the internal memory bus
* RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
*
* Given these two numbers, the formula is simple:
*
* RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
*
* 24 bytes is for 3x SETUP packets
* 16 bytes is a clock domain crossing tolerance
*
* Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
*/
static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
{
u32 ram2_depth;
u32 mdwidth;
u32 nump;
u32 reg;
ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
nump = min_t(u32, nump, 16);
/* update NumP */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~DWC3_DCFG_NUMP_MASK;
reg |= nump << DWC3_DCFG_NUMP_SHIFT;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
static int __dwc3_gadget_start(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int ret = 0;
u32 reg;
/*
* Use IMOD if enabled via dwc->imod_interval. Otherwise, if
* the core supports IMOD, disable it.
*/
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
} else if (dwc3_has_imod(dwc)) {
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
}
/*
* We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
* field instead of letting dwc3 itself calculate that automatically.
*
* This way, we maximize the chances that we'll be able to get several
* bursts of data without going through any sort of endpoint throttling.
*/
reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
if (dwc3_is_usb31(dwc))
reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
else
reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
dwc3_gadget_setup_nump(dwc);
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
ret = __dwc3_gadget_ep_enable(dep, false, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
}
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
return 0;
err1:
__dwc3_gadget_ep_disable(dwc->eps[0]);
err0:
return ret;
}
static int dwc3_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret = 0;
int irq;
irq = dwc->irq_gadget;
ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
IRQF_SHARED, "dwc3", dwc->ev_buf);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
goto err0;
}
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver) {
dev_err(dwc->dev, "%s is already bound to %s\n",
dwc->gadget.name,
dwc->gadget_driver->driver.name);
ret = -EBUSY;
goto err1;
}
dwc->gadget_driver = driver;
if (pm_runtime_active(dwc->dev))
__dwc3_gadget_start(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
err1:
spin_unlock_irqrestore(&dwc->lock, flags);
free_irq(irq, dwc);
err0:
return ret;
}
static void __dwc3_gadget_stop(struct dwc3 *dwc)
{
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
}
static int dwc3_gadget_stop(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int epnum;
u32 tmo_eps = 0;
spin_lock_irqsave(&dwc->lock, flags);
if (pm_runtime_suspended(dwc->dev))
goto out;
__dwc3_gadget_stop(dwc);
for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
struct dwc3_ep *dep = dwc->eps[epnum];
int ret;
if (!dep)
continue;
if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
continue;
ret = wait_event_interruptible_lock_irq_timeout(dep->wait_end_transfer,
!(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
dwc->lock, msecs_to_jiffies(5));
if (ret <= 0) {
/* Timed out or interrupted! There's nothing much
* we can do so we just log here and print which
* endpoints timed out at the end.
*/
tmo_eps |= 1 << epnum;
dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
}
}
if (tmo_eps) {
dev_err(dwc->dev,
"end transfer timed out on endpoints 0x%x [bitmap]\n",
tmo_eps);
}
out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
free_irq(dwc->irq_gadget, dwc->ev_buf);
return 0;
}
static void dwc3_gadget_set_speed(struct usb_gadget *g,
enum usb_device_speed speed)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
/*
* WORKAROUND: DWC3 revision < 2.20a have an issue
* which would cause metastability state on Run/Stop
* bit if we try to force the IP to USB2-only mode.
*
* Because of that, we cannot configure the IP to any
* speed other than the SuperSpeed
*
* Refers to:
*
* STAR#9000525659: Clock Domain Crossing on DCTL in
* USB 2.0 Mode
*/
if (dwc->revision < DWC3_REVISION_220A &&
!dwc->dis_metastability_quirk) {
reg |= DWC3_DCFG_SUPERSPEED;
} else {
switch (speed) {
case USB_SPEED_LOW:
reg |= DWC3_DCFG_LOWSPEED;
break;
case USB_SPEED_FULL:
reg |= DWC3_DCFG_FULLSPEED;
break;
case USB_SPEED_HIGH:
reg |= DWC3_DCFG_HIGHSPEED;
break;
case USB_SPEED_SUPER:
reg |= DWC3_DCFG_SUPERSPEED;
break;
case USB_SPEED_SUPER_PLUS:
if (dwc3_is_usb31(dwc))
reg |= DWC3_DCFG_SUPERSPEED_PLUS;
else
reg |= DWC3_DCFG_SUPERSPEED;
break;
default:
dev_err(dwc->dev, "invalid speed (%d)\n", speed);
if (dwc->revision & DWC3_REVISION_IS_DWC31)
reg |= DWC3_DCFG_SUPERSPEED_PLUS;
else
reg |= DWC3_DCFG_SUPERSPEED;
}
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
spin_unlock_irqrestore(&dwc->lock, flags);
}
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
.set_selfpowered = dwc3_gadget_set_selfpowered,
.pullup = dwc3_gadget_pullup,
.udc_start = dwc3_gadget_start,
.udc_stop = dwc3_gadget_stop,
.udc_set_speed = dwc3_gadget_set_speed,
};
/* -------------------------------------------------------------------------- */
static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
{
struct dwc3_ep *dep;
u8 epnum;
INIT_LIST_HEAD(&dwc->gadget.ep_list);
for (epnum = 0; epnum < total; epnum++) {
bool direction = epnum & 1;
u8 num = epnum >> 1;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
return -ENOMEM;
dep->dwc = dwc;
dep->number = epnum;
dep->direction = direction;
dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
dwc->eps[epnum] = dep;
snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
direction ? "in" : "out");
dep->endpoint.name = dep->name;
if (!(dep->number > 1)) {
dep->endpoint.desc = &dwc3_gadget_ep0_desc;
dep->endpoint.comp_desc = NULL;
}
spin_lock_init(&dep->lock);
if (num == 0) {
usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
dep->endpoint.maxburst = 1;
dep->endpoint.ops = &dwc3_gadget_ep0_ops;
if (!direction)
dwc->gadget.ep0 = &dep->endpoint;
} else if (direction) {
int mdwidth;
int kbytes;
int size;
int ret;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth /= 8;
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num));
if (dwc3_is_usb31(dwc))
size = DWC31_GTXFIFOSIZ_TXFDEF(size);
else
size = DWC3_GTXFIFOSIZ_TXFDEF(size);
/* FIFO Depth is in MDWDITH bytes. Multiply */
size *= mdwidth;
kbytes = size / 1024;
if (kbytes == 0)
kbytes = 1;
/*
* FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for
* internal overhead. We don't really know how these are used,
* but documentation say it exists.
*/
size -= mdwidth * (kbytes + 1);
size /= kbytes;
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
&dwc->gadget.ep_list);
ret = dwc3_alloc_trb_pool(dep);
if (ret)
return ret;
} else {
int ret;
usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
&dwc->gadget.ep_list);
ret = dwc3_alloc_trb_pool(dep);
if (ret)
return ret;
}
if (num == 0) {
dep->endpoint.caps.type_control = true;
} else {
dep->endpoint.caps.type_iso = true;
dep->endpoint.caps.type_bulk = true;
dep->endpoint.caps.type_int = true;
}
dep->endpoint.caps.dir_in = direction;
dep->endpoint.caps.dir_out = !direction;
INIT_LIST_HEAD(&dep->pending_list);
INIT_LIST_HEAD(&dep->started_list);
}
return 0;
}
static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
u8 epnum;
for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
dep = dwc->eps[epnum];
if (!dep)
continue;
/*
* Physical endpoints 0 and 1 are special; they form the
* bi-directional USB endpoint 0.
*
* For those two physical endpoints, we don't allocate a TRB
* pool nor do we add them the endpoints list. Due to that, we
* shouldn't do these two operations otherwise we would end up
* with all sorts of bugs when removing dwc3.ko.
*/
if (epnum != 0 && epnum != 1) {
dwc3_free_trb_pool(dep);
list_del(&dep->endpoint.ep_list);
}
kfree(dep);
}
}
/* -------------------------------------------------------------------------- */
static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, struct dwc3_trb *trb,
const struct dwc3_event_depevt *event, int status,
int chain)
{
unsigned int count;
unsigned int s_pkt = 0;
unsigned int trb_status;
dwc3_ep_inc_deq(dep);
if (req->trb == trb)
dep->queued_requests--;
trace_dwc3_complete_trb(dep, trb);
/*
* If we're in the middle of series of chained TRBs and we
* receive a short transfer along the way, DWC3 will skip
* through all TRBs including the last TRB in the chain (the
* where CHN bit is zero. DWC3 will also avoid clearing HWO
* bit and SW has to do it manually.
*
* We're going to do that here to avoid problems of HW trying
* to use bogus TRBs for transfers.
*/
if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
/*
* If we're dealing with unaligned size OUT transfer, we will be left
* with one TRB pending in the ring. We need to manually clear HWO bit
* from that TRB.
*/
if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
return 1;
}
count = trb->size & DWC3_TRB_SIZE_MASK;
req->remaining += count;
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
return 1;
if (dep->direction) {
if (count) {
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
/*
* If missed isoc occurred and there is
* no request queued then issue END
* TRANSFER, so that core generates
* next xfernotready and we will issue
* a fresh START TRANSFER.
* If there are still queued request
* then wait, do not issue either END
* or UPDATE TRANSFER, just attach next
* request in pending_list during
* giveback.If any future queued request
* is successfully transferred then we
* will issue UPDATE TRANSFER for all
* request in the pending_list.
*/
dep->flags |= DWC3_EP_MISSED_ISOC;
} else {
dev_err(dwc->dev, "incomplete IN transfer %s\n",
dep->name);
status = -ECONNRESET;
}
} else {
dep->flags &= ~DWC3_EP_MISSED_ISOC;
}
} else {
if (count && (event->status & DEPEVT_STATUS_SHORT))
s_pkt = 1;
}
if (s_pkt && !chain)
return 1;
if ((event->status & DEPEVT_STATUS_IOC) &&
(trb->ctrl & DWC3_TRB_CTRL_IOC))
return 1;
return 0;
}
static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3_request *req, *n;
struct dwc3_trb *trb;
bool ioc = false;
int ret = 0;
list_for_each_entry_safe(req, n, &dep->started_list, list) {
unsigned length;
int chain;
length = req->request.length;
chain = req->num_pending_sgs > 0;
if (chain) {
struct scatterlist *sg = req->sg;
struct scatterlist *s;
unsigned int pending = req->num_pending_sgs;
unsigned int i;
for_each_sg(sg, s, pending, i) {
trb = &dep->trb_pool[dep->trb_dequeue];
if (trb->ctrl & DWC3_TRB_CTRL_HWO)
break;
req->sg = sg_next(s);
req->num_pending_sgs--;
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
event, status, chain);
if (ret)
break;
}
} else {
trb = &dep->trb_pool[dep->trb_dequeue];
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
event, status, chain);
}
if (req->unaligned || req->zero) {
trb = &dep->trb_pool[dep->trb_dequeue];
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
event, status, false);
req->unaligned = false;
req->zero = false;
}
req->request.actual = length - req->remaining;
if ((req->request.actual < length) && req->num_pending_sgs)
return __dwc3_gadget_kick_transfer(dep);
dwc3_gadget_giveback(dep, req, status);
if (ret) {
if ((event->status & DEPEVT_STATUS_IOC) &&
(trb->ctrl & DWC3_TRB_CTRL_IOC))
ioc = true;
break;
}
}
/*
* Our endpoint might get disabled by another thread during
* dwc3_gadget_giveback(). If that happens, we're just gonna return 1
* early on so DWC3_EP_BUSY flag gets cleared
*/
if (!dep->endpoint.desc)
return 1;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list)) {
if (list_empty(&dep->pending_list)) {
/*
* If there is no entry in request list then do
* not issue END TRANSFER now. Just set PENDING
* flag, so that END TRANSFER is issued when an
* entry is added into request list.
*/
dep->flags = DWC3_EP_PENDING_REQUEST;
} else {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
return 1;
}
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc)
return 0;
return 1;
}
static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
{
unsigned status = 0;
int clean_busy;
u32 is_xfer_complete;
is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
if (event->status & DEPEVT_STATUS_BUSERR)
status = -ECONNRESET;
clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
usb_endpoint_xfer_isoc(dep->endpoint.desc)))
dep->flags &= ~DWC3_EP_BUSY;
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
* See dwc3_gadget_linksts_change_interrupt() for 1st half.
*/
if (dwc->revision < DWC3_REVISION_183A) {
u32 reg;
int i;
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
dep = dwc->eps[i];
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
if (!list_empty(&dep->started_list))
return;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= dwc->u1u2;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->u1u2 = 0;
}
/*
* Our endpoint might get disabled by another thread during
* dwc3_gadget_giveback(). If that happens, we're just gonna return 1
* early on so DWC3_EP_BUSY flag gets cleared
*/
if (!dep->endpoint.desc)
return;
if (!usb_endpoint_xfer_isoc(dep->endpoint.desc))
__dwc3_gadget_kick_transfer(dep);
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep;
u8 epnum = event->endpoint_number;
u8 cmd;
dep = dwc->eps[epnum];
if (!(dep->flags & DWC3_EP_ENABLED)) {
if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
return;
/* Handle only EPCMDCMPLT when EP disabled */
if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
return;
}
if (epnum == 0 || epnum == 1) {
dwc3_ep0_interrupt(dwc, event);
return;
}
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dep->resource_index = 0;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
return;
}
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERINPROGRESS:
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_start_isoc(dwc, dep, event);
else
__dwc3_gadget_kick_transfer(dep);
break;
case DWC3_DEPEVT_STREAMEVT:
if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
dep->name);
return;
}
break;
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
wake_up(&dep->wait_end_transfer);
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
}
}
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->disconnect(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->suspend(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_reset_gadget(struct dwc3 *dwc)
{
if (!dwc->gadget_driver)
return;
if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dwc->lock);
usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
}
}
static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
{
struct dwc3_ep *dep;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret;
dep = dwc->eps[epnum];
if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
!dep->resource_index)
return;
/*
* NOTICE: We are violating what the Databook says about the
* EndTransfer command. Ideally we would _always_ wait for the
* EndTransfer Command Completion IRQ, but that's causing too
* much trouble synchronizing between us and gadget driver.
*
* We have discussed this with the IP Provider and it was
* suggested to giveback all requests here, but give HW some
* extra time to synchronize with the interconnect. We're using
* an arbitrary 100us delay for that.
*
* Note also that a similar handling was tested by Synopsys
* (thanks a lot Paul) and nothing bad has come out of it.
* In short, what we're doing is:
*
* - Issue EndTransfer WITH CMDIOC bit set
* - Wait 100us
*
* As of IP version 3.10a of the DWC_usb3 IP, the controller
* supports a mode to work around the above limitation. The
* software can poll the CMDACT bit in the DEPCMD register
* after issuing a EndTransfer command. This mode is enabled
* by writing GUCTL2[14]. This polling is already done in the
* dwc3_send_gadget_ep_cmd() function so if the mode is
* enabled, the EndTransfer command will have completed upon
* returning from this function and we don't need to delay for
* 100us.
*
* This mode is NOT available on the DWC_usb31 IP.
*/
cmd = DWC3_DEPCMD_ENDTRANSFER;
cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(¶ms, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
udelay(100);
}
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
{
u32 epnum;
for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
struct dwc3_ep *dep;
int ret;
dep = dwc->eps[epnum];
if (!dep)
continue;
if (!(dep->flags & DWC3_EP_STALL))
continue;
dep->flags &= ~DWC3_EP_STALL;
ret = dwc3_send_clear_stall_ep_cmd(dep);
WARN_ON_ONCE(ret);
}
}
static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_INITU1ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
reg &= ~DWC3_DCTL_INITU2ENA;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc3_disconnect_gadget(dwc);
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
dwc->connected = false;
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
dwc->connected = true;
/*
* WORKAROUND: DWC3 revisions <1.88a have an issue which
* would cause a missing Disconnect Event if there's a
* pending Setup Packet in the FIFO.
*
* There's no suggested workaround on the official Bug
* report, which states that "unless the driver/application
* is doing any special handling of a disconnect event,
* there is no functional issue".
*
* Unfortunately, it turns out that we _do_ some special
* handling of a disconnect event, namely complete all
* pending transfers, notify gadget driver of the
* disconnection, and so on.
*
* Our suggested workaround is to follow the Disconnect
* Event steps here, instead, based on a setup_packet_pending
* flag. Such flag gets set whenever we have a SETUP_PENDING
* status for EP0 TRBs and gets cleared on XferComplete for the
* same endpoint.
*
* Refers to:
*
* STAR#9000466709: RTL: Device : Disconnect event not
* generated if setup packet pending in FIFO
*/
if (dwc->revision < DWC3_REVISION_188A) {
if (dwc->setup_packet_pending)
dwc3_gadget_disconnect_interrupt(dwc);
}
dwc3_reset_gadget(dwc);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc->test_mode = false;
dwc3_clear_stall_all_ep(dwc);
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
}
static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int ret;
u32 reg;
u8 speed;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
/*
* RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
* each time on Connect Done.
*
* Currently we always use the reset value. If any platform
* wants to set this to a different value, we need to add a
* setting and update GCTL.RAMCLKSEL here.
*/
switch (speed) {
case DWC3_DSTS_SUPERSPEED_PLUS:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
break;
case DWC3_DSTS_SUPERSPEED:
/*
* WORKAROUND: DWC3 revisions <1.90a have an issue which
* would cause a missing USB3 Reset event.
*
* In such situations, we should force a USB3 Reset
* event by calling our dwc3_gadget_reset_interrupt()
* routine.
*
* Refers to:
*
* STAR#9000483510: RTL: SS : USB3 reset event may
* not be generated always when the link enters poll
*/
if (dwc->revision < DWC3_REVISION_190A)
dwc3_gadget_reset_interrupt(dwc);
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
break;
case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_HIGH;
break;
case DWC3_DSTS_FULLSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_FULL;
break;
case DWC3_DSTS_LOWSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
dwc->gadget.ep0->maxpacket = 8;
dwc->gadget.speed = USB_SPEED_LOW;
break;
}
dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
/* Enable USB2 LPM Capability */
if ((dwc->revision > DWC3_REVISION_194A) &&
(speed != DWC3_DSTS_SUPERSPEED) &&
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg |= DWC3_DCFG_LPM_CAP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
/*
* When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
* DCFG.LPMCap is set, core responses with an ACK and the
* BESL value in the LPM token is less than or equal to LPM
* NYET threshold.
*/
WARN_ONCE(dwc->revision < DWC3_REVISION_240A
&& dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
} else {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
dep = dwc->eps[0];
ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
ret = __dwc3_gadget_ep_enable(dep, true, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
/*
* Configure PHY via GUSB3PIPECTLn if required.
*
* Update GTXFIFOSIZn
*
* In both cases reset values should be sufficient.
*/
}
static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
{
/*
* TODO take core out of low power mode when that's
* implemented.
*/
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
unsigned int pwropt;
/*
* WORKAROUND: DWC3 < 2.50a have an issue when configured without
* Hibernation mode enabled which would show up when device detects
* host-initiated U3 exit.
*
* In that case, device will generate a Link State Change Interrupt
* from U3 to RESUME which is only necessary if Hibernation is
* configured in.
*
* There are no functional changes due to such spurious event and we
* just need to ignore it.
*
* Refers to:
*
* STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
* operational mode
*/
pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
if ((dwc->revision < DWC3_REVISION_250A) &&
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
return;
}
}
/*
* WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
* on the link partner, the USB session might do multiple entry/exit
* of low power states before a transfer takes place.
*
* Due to this problem, we might experience lower throughput. The
* suggested workaround is to disable DCTL[12:9] bits if we're
* transitioning from U1/U2 to U0 and enable those bits again
* after a transfer completes and there are no pending transfers
* on any of the enabled endpoints.
*
* This is the first half of that workaround.
*
* Refers to:
*
* STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
* core send LGO_Ux entering U0
*/
if (dwc->revision < DWC3_REVISION_183A) {
if (next == DWC3_LINK_STATE_U0) {
u32 u1u2;
u32 reg;
switch (dwc->link_state) {
case DWC3_LINK_STATE_U1:
case DWC3_LINK_STATE_U2:
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
u1u2 = reg & (DWC3_DCTL_INITU2ENA
| DWC3_DCTL_ACCEPTU2ENA
| DWC3_DCTL_INITU1ENA
| DWC3_DCTL_ACCEPTU1ENA);
if (!dwc->u1u2)
dwc->u1u2 = reg & u1u2;
reg &= ~u1u2;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
break;
default:
/* do nothing */
break;
}
}
}
switch (next) {
case DWC3_LINK_STATE_U1:
if (dwc->speed == USB_SPEED_SUPER)
dwc3_suspend_gadget(dwc);
break;
case DWC3_LINK_STATE_U2:
case DWC3_LINK_STATE_U3:
dwc3_suspend_gadget(dwc);
break;
case DWC3_LINK_STATE_RESUME:
dwc3_resume_gadget(dwc);
break;
default:
/* do nothing */
break;
}
dwc->link_state = next;
}
static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
dwc3_suspend_gadget(dwc);
dwc->link_state = next;
}
static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
unsigned int is_ss = evtinfo & BIT(4);
/*
* WORKAROUND: DWC3 revison 2.20a with hibernation support
* have a known issue which can cause USB CV TD.9.23 to fail
* randomly.
*
* Because of this issue, core could generate bogus hibernation
* events which SW needs to ignore.
*
* Refers to:
*
* STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
* Device Fallback from SuperSpeed
*/
if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
return;
/* enter hibernation here */
}
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
const struct dwc3_event_devt *event)
{
switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
dwc3_gadget_disconnect_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_RESET:
dwc3_gadget_reset_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_CONNECT_DONE:
dwc3_gadget_conndone_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_WAKEUP:
dwc3_gadget_wakeup_interrupt(dwc);
break;
case DWC3_DEVICE_EVENT_HIBER_REQ:
if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
"unexpected hibernation event\n"))
break;
dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_EOPF:
/* It changed to be suspend event for version 2.30a and above */
if (dwc->revision >= DWC3_REVISION_230A) {
/*
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
*/
if (dwc->gadget.state >= USB_STATE_CONFIGURED)
dwc3_gadget_suspend_interrupt(dwc,
event->event_info);
}
break;
case DWC3_DEVICE_EVENT_SOF:
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
case DWC3_DEVICE_EVENT_CMD_CMPL:
case DWC3_DEVICE_EVENT_OVERFLOW:
break;
default:
dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
}
}
static void dwc3_process_event_entry(struct dwc3 *dwc,
const union dwc3_event *event)
{
trace_dwc3_event(event->raw, dwc);
if (!event->type.is_devspec)
dwc3_endpoint_interrupt(dwc, &event->depevt);
else if (event->type.type == DWC3_EVENT_TYPE_DEV)
dwc3_gadget_interrupt(dwc, &event->devt);
else
dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
}
static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
irqreturn_t ret = IRQ_NONE;
int left;
u32 reg;
left = evt->count;
if (!(evt->flags & DWC3_EVENT_PENDING))
return IRQ_NONE;
while (left > 0) {
union dwc3_event event;
event.raw = *(u32 *) (evt->cache + evt->lpos);
dwc3_process_event_entry(dwc, &event);
/*
* FIXME we wrap around correctly to the next entry as
* almost all entries are 4 bytes in size. There is one
* entry which has 12 bytes which is a regular entry
* followed by 8 bytes data. ATM I don't know how
* things are organized if we get next to the a
* boundary so I worry about that once we try to handle
* that.
*/
evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
}
evt->count = 0;
evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
/* Unmask interrupt */
reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg &= ~DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
return ret;
}
static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
{
struct dwc3_event_buffer *evt = _evt;
struct dwc3 *dwc = evt->dwc;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
struct dwc3 *dwc = evt->dwc;
u32 amount;
u32 count;
u32 reg;
if (pm_runtime_suspended(dwc->dev)) {
pm_runtime_get(dwc->dev);
disable_irq_nosync(dwc->irq_gadget);
dwc->pending_events = true;
return IRQ_HANDLED;
}
/*
* With PCIe legacy interrupt, test shows that top-half irq handler can
* be called again after HW interrupt deassertion. Check if bottom-half
* irq event handler completes before caching new event to prevent
* losing events.
*/
if (evt->flags & DWC3_EVENT_PENDING)
return IRQ_HANDLED;
count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
count &= DWC3_GEVNTCOUNT_MASK;
if (!count)
return IRQ_NONE;
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
/* Mask interrupt */
reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg |= DWC3_GEVNTSIZ_INTMASK;
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
amount = min(count, evt->length - evt->lpos);
memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
if (amount < count)
memcpy(evt->cache, evt->buf, count - amount);
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
return IRQ_WAKE_THREAD;
}
static irqreturn_t dwc3_interrupt(int irq, void *_evt)
{
struct dwc3_event_buffer *evt = _evt;
return dwc3_check_event_buf(evt);
}
static int dwc3_gadget_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq(dwc3_pdev, 0);
if (irq > 0)
goto out;
if (irq != -EPROBE_DEFER)
dev_err(dwc->dev, "missing peripheral IRQ\n");
if (!irq)
irq = -EINVAL;
out:
return irq;
}
/**
* dwc3_gadget_init - initializes gadget related registers
* @dwc: pointer to our controller context structure
*
* Returns 0 on success otherwise negative errno.
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
int ret;
int irq;
irq = dwc3_gadget_get_irq(dwc);
if (irq < 0) {
ret = irq;
goto err0;
}
dwc->irq_gadget = irq;
dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
sizeof(*dwc->ep0_trb) * 2,
&dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
ret = -ENOMEM;
goto err0;
}
dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
if (!dwc->setup_buf) {
ret = -ENOMEM;
goto err1;
}
dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
&dwc->bounce_addr, GFP_KERNEL);
if (!dwc->bounce) {
ret = -ENOMEM;
goto err2;
}
init_completion(&dwc->ep0_in_setup);
dwc->gadget.ops = &dwc3_gadget_ops;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
/*
* FIXME We might be setting max_speed to <SUPER, however versions
* <2.20a of dwc3 have an issue with metastability (documented
* elsewhere in this driver) which tells us we can't set max speed to
* anything lower than SUPER.
*
* Because gadget.max_speed is only used by composite.c and function
* drivers (i.e. it won't go into dwc3's registers) we are allowing this
* to happen so we avoid sending SuperSpeed Capability descriptor
* together with our BOS descriptor as that could confuse host into
* thinking we can handle super speed.
*
* Note that, in fact, we won't even support GetBOS requests when speed
* is less than super speed because we don't have means, yet, to tell
* composite.c that we are USB 2.0 + LPM ECN.
*/
if (dwc->revision < DWC3_REVISION_220A &&
!dwc->dis_metastability_quirk)
dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
dwc->gadget.max_speed = dwc->maximum_speed;
/*
* REVISIT: Here we should clear all pending IRQs to be
* sure we're starting from a well known location.
*/
ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
if (ret)
goto err3;
ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to register udc\n");
goto err4;
}
return 0;
err4:
dwc3_gadget_free_endpoints(dwc);
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
err2:
kfree(dwc->setup_buf);
err1:
dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err0:
return ret;
}
/* -------------------------------------------------------------------------- */
void dwc3_gadget_exit(struct dwc3 *dwc)
{
usb_del_gadget_udc(&dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
kfree(dwc->setup_buf);
dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
}
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
if (!dwc->gadget_driver)
return 0;
dwc3_gadget_run_stop(dwc, false, false);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
return 0;
}
int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
if (!dwc->gadget_driver)
return 0;
ret = __dwc3_gadget_start(dwc);
if (ret < 0)
goto err0;
ret = dwc3_gadget_run_stop(dwc, true, false);
if (ret < 0)
goto err1;
return 0;
err1:
__dwc3_gadget_stop(dwc);
err0:
return ret;
}
void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
{
if (dwc->pending_events) {
dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
dwc->pending_events = false;
enable_irq(dwc->irq_gadget);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_996_0 |
crossvul-cpp_data_bad_3497_0 | /*
* fs/cifs/cifssmb.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for constructing the SMB PDUs themselves
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* SMB/CIFS PDU handling routines here - except for leftovers in connect.c */
/* These are mostly routines that operate on a pathname, or on a tree id */
/* (mounted volume), but there are eight handle based routines which must be */
/* treated slightly differently for reconnection purposes since we never */
/* want to reuse a stale file handle and only the caller knows the file info */
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/posix_acl_xattr.h>
#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsacl.h"
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#ifdef CONFIG_CIFS_POSIX
static struct {
int index;
char *name;
} protocols[] = {
#ifdef CONFIG_CIFS_WEAK_PW_HASH
{LANMAN_PROT, "\2LM1.2X002"},
{LANMAN2_PROT, "\2LANMAN2.1"},
#endif /* weak password hashing for legacy clients */
{CIFS_PROT, "\2NT LM 0.12"},
{POSIX_PROT, "\2POSIX 2"},
{BAD_PROT, "\2"}
};
#else
static struct {
int index;
char *name;
} protocols[] = {
#ifdef CONFIG_CIFS_WEAK_PW_HASH
{LANMAN_PROT, "\2LM1.2X002"},
{LANMAN2_PROT, "\2LANMAN2.1"},
#endif /* weak password hashing for legacy clients */
{CIFS_PROT, "\2NT LM 0.12"},
{BAD_PROT, "\2"}
};
#endif
/* define the number of elements in the cifs dialect array */
#ifdef CONFIG_CIFS_POSIX
#ifdef CONFIG_CIFS_WEAK_PW_HASH
#define CIFS_NUM_PROT 4
#else
#define CIFS_NUM_PROT 2
#endif /* CIFS_WEAK_PW_HASH */
#else /* not posix */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
#define CIFS_NUM_PROT 3
#else
#define CIFS_NUM_PROT 1
#endif /* CONFIG_CIFS_WEAK_PW_HASH */
#endif /* CIFS_POSIX */
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
static void mark_open_files_invalid(struct cifs_tcon *pTcon)
{
struct cifsFileInfo *open_file = NULL;
struct list_head *tmp;
struct list_head *tmp1;
/* list all files open on tree connection and mark them invalid */
spin_lock(&cifs_file_list_lock);
list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
open_file->oplock_break_cancelled = true;
}
spin_unlock(&cifs_file_list_lock);
/* BB Add call to invalidate_inodes(sb) for all superblocks mounted
to this tcon */
}
/* reconnect the socket, tcon, and smb session if needed */
static int
cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
{
int rc;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_codepage;
/*
* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
* tcp and smb session status done differently for those three - in the
* calling routine
*/
if (!tcon)
return 0;
ses = tcon->ses;
server = ses->server;
/*
* only tree disconnect, open, and write, (and ulogoff which does not
* have tcon) are allowed as we start force umount
*/
if (tcon->tidStatus == CifsExiting) {
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
cFYI(1, "can not send cmd %d while umounting",
smb_command);
return -ENODEV;
}
}
/*
* Give demultiplex thread up to 10 seconds to reconnect, should be
* greater than cifs socket timeout which is 7 seconds
*/
while (server->tcpStatus == CifsNeedReconnect) {
wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect), 10 * HZ);
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
/*
* on "soft" mounts we wait once. Hard mounts keep
* retrying until process is killed or server comes
* back on-line
*/
if (!tcon->retry) {
cFYI(1, "gave up waiting on reconnect in smb_init");
return -EHOSTDOWN;
}
}
if (!ses->need_reconnect && !tcon->need_reconnect)
return 0;
nls_codepage = load_nls_default();
/*
* need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session
*/
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(0, ses);
if (rc == 0 && ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
mutex_unlock(&ses->session_mutex);
goto out;
}
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
mutex_unlock(&ses->session_mutex);
cFYI(1, "reconnect tcon rc = %d", rc);
if (rc)
goto out;
/*
* FIXME: check if wsize needs updated due to negotiated smb buffer
* size shrinking
*/
atomic_inc(&tconInfoReconnectCount);
/* tell server Unix caps we support */
if (ses->capabilities & CAP_UNIX)
reset_cifs_unix_caps(0, tcon, NULL, NULL);
/*
* Removed call to reopen open files here. It is safer (and faster) to
* reopen files one at a time as needed in read and write.
*
* FIXME: what about file locks? don't we need to reclaim them ASAP?
*/
out:
/*
* Check if handle based operation so we know whether we can continue
* or not without returning to caller to reset file handle
*/
switch (smb_command) {
case SMB_COM_READ_ANDX:
case SMB_COM_WRITE_ANDX:
case SMB_COM_CLOSE:
case SMB_COM_FIND_CLOSE2:
case SMB_COM_LOCKING_ANDX:
rc = -EAGAIN;
}
unload_nls(nls_codepage);
return rc;
}
/* Allocate and return pointer to an SMB request buffer, and set basic
SMB information in the SMB header. If the return code is zero, this
function must have filled in request_buf pointer */
static int
small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf)
{
int rc;
rc = cifs_reconnect_tcon(tcon, smb_command);
if (rc)
return rc;
*request_buf = cifs_small_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
}
header_assemble((struct smb_hdr *) *request_buf, smb_command,
tcon, wct);
if (tcon != NULL)
cifs_stats_inc(&tcon->num_smbs_sent);
return 0;
}
int
small_smb_init_no_tc(const int smb_command, const int wct,
struct cifs_ses *ses, void **request_buf)
{
int rc;
struct smb_hdr *buffer;
rc = small_smb_init(smb_command, wct, NULL, request_buf);
if (rc)
return rc;
buffer = (struct smb_hdr *)*request_buf;
buffer->Mid = GetNextMid(ses->server);
if (ses->capabilities & CAP_UNICODE)
buffer->Flags2 |= SMBFLG2_UNICODE;
if (ses->capabilities & CAP_STATUS32)
buffer->Flags2 |= SMBFLG2_ERR_STATUS;
/* uid, tid can stay at zero as set in header assemble */
/* BB add support for turning on the signing when
this function is used after 1st of session setup requests */
return rc;
}
/* If the return code is zero, this function must fill in request_buf pointer */
static int
__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
*request_buf = cifs_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
}
/* Although the original thought was we needed the response buf for */
/* potential retries of smb operations it turns out we can determine */
/* from the mid flags when the request buffer can be resent without */
/* having to use a second distinct buffer for the response */
if (response_buf)
*response_buf = *request_buf;
header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
wct);
if (tcon != NULL)
cifs_stats_inc(&tcon->num_smbs_sent);
return 0;
}
/* If the return code is zero, this function must fill in request_buf pointer */
static int
smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
int rc;
rc = cifs_reconnect_tcon(tcon, smb_command);
if (rc)
return rc;
return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
}
static int
smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
if (tcon->ses->need_reconnect || tcon->need_reconnect)
return -EHOSTDOWN;
return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
}
static int validate_t2(struct smb_t2_rsp *pSMB)
{
unsigned int total_size;
/* check for plausible wct */
if (pSMB->hdr.WordCount < 10)
goto vt2_err;
/* check for parm and data offset going beyond end of smb */
if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
goto vt2_err;
total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
if (total_size >= 512)
goto vt2_err;
/* check that bcc is at least as big as parms + data, and that it is
* less than negotiated smb buffer
*/
total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
if (total_size > get_bcc(&pSMB->hdr) ||
total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
goto vt2_err;
return 0;
vt2_err:
cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
sizeof(struct smb_t2_rsp) + 16);
return -EINVAL;
}
static inline void inc_rfc1001_len(void *pSMB, int count)
{
struct smb_hdr *hdr = (struct smb_hdr *)pSMB;
be32_add_cpu(&hdr->smb_buf_length, count);
}
int
CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
{
NEGOTIATE_REQ *pSMB;
NEGOTIATE_RSP *pSMBr;
int rc = 0;
int bytes_returned;
int i;
struct TCP_Server_Info *server;
u16 count;
unsigned int secFlags;
if (ses->server)
server = ses->server;
else {
rc = -EIO;
return rc;
}
rc = smb_init(SMB_COM_NEGOTIATE, 0, NULL /* no tcon yet */ ,
(void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
/* if any of auth flags (ie not sign or seal) are overriden use them */
if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */
else /* if override flags set only sign/seal OR them with global auth */
secFlags = global_secflags | ses->overrideSecFlg;
cFYI(1, "secFlags 0x%x", secFlags);
pSMB->hdr.Mid = GetNextMid(server);
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) {
cFYI(1, "Kerberos only mechanism, enable extended security");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
} else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) {
cFYI(1, "NTLMSSP only mechanism, enable extended security");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
count = 0;
for (i = 0; i < CIFS_NUM_PROT; i++) {
strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
count += strlen(protocols[i].name) + 1;
/* null at end of source and target buffers anyway */
}
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc != 0)
goto neg_err_exit;
server->dialect = le16_to_cpu(pSMBr->DialectIndex);
cFYI(1, "Dialect: %d", server->dialect);
/* Check wct = 1 error case */
if ((pSMBr->hdr.WordCount < 13) || (server->dialect == BAD_PROT)) {
/* core returns wct = 1, but we do not ask for core - otherwise
small wct just comes when dialect index is -1 indicating we
could not negotiate a common dialect */
rc = -EOPNOTSUPP;
goto neg_err_exit;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
} else if ((pSMBr->hdr.WordCount == 13)
&& ((server->dialect == LANMAN_PROT)
|| (server->dialect == LANMAN2_PROT))) {
__s16 tmp;
struct lanman_neg_rsp *rsp = (struct lanman_neg_rsp *)pSMBr;
if ((secFlags & CIFSSEC_MAY_LANMAN) ||
(secFlags & CIFSSEC_MAY_PLNTXT))
server->secType = LANMAN;
else {
cERROR(1, "mount failed weak security disabled"
" in /proc/fs/cifs/SecurityFlags");
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
/* even though we do not use raw we might as well set this
accurately, in case we ever find a need for it */
if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
server->max_rw = 0xFF00;
server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
} else {
server->max_rw = 0;/* do not need to use raw anyway */
server->capabilities = CAP_MPX_MODE;
}
tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone);
if (tmp == -1) {
/* OS/2 often does not set timezone therefore
* we must use server time to calc time zone.
* Could deviate slightly from the right zone.
* Smallest defined timezone difference is 15 minutes
* (i.e. Nepal). Rounding up/down is done to match
* this requirement.
*/
int val, seconds, remain, result;
struct timespec ts, utc;
utc = CURRENT_TIME;
ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
rsp->SrvTime.Time, 0);
cFYI(1, "SrvTime %d sec since 1970 (utc: %d) diff: %d",
(int)ts.tv_sec, (int)utc.tv_sec,
(int)(utc.tv_sec - ts.tv_sec));
val = (int)(utc.tv_sec - ts.tv_sec);
seconds = abs(val);
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
remain = seconds % MIN_TZ_ADJ;
if (remain >= (MIN_TZ_ADJ / 2))
result += MIN_TZ_ADJ;
if (val < 0)
result = -result;
server->timeAdj = result;
} else {
server->timeAdj = (int)tmp;
server->timeAdj *= 60; /* also in seconds */
}
cFYI(1, "server->timeAdj: %d seconds", server->timeAdj);
/* BB get server time for time conversions and add
code to use it and timezone since this is not UTC */
if (rsp->EncryptionKeyLength ==
cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
memcpy(ses->server->cryptkey, rsp->EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
} else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
rc = -EIO; /* need cryptkey unless plain text */
goto neg_err_exit;
}
cFYI(1, "LANMAN negotiated");
/* we will not end up setting signing flags - as no signing
was in LANMAN and server did not return the flags on */
goto signing_check;
#else /* weak security disabled */
} else if (pSMBr->hdr.WordCount == 13) {
cERROR(1, "mount failed, cifs module not built "
"with CIFS_WEAK_PW_HASH support");
rc = -EOPNOTSUPP;
#endif /* WEAK_PW_HASH */
goto neg_err_exit;
} else if (pSMBr->hdr.WordCount != 17) {
/* unknown wct */
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
/* else wct == 17 NTLM */
server->sec_mode = pSMBr->SecurityMode;
if ((server->sec_mode & SECMODE_USER) == 0)
cFYI(1, "share mode security");
if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0)
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
#endif /* CIFS_WEAK_PW_HASH */
cERROR(1, "Server requests plain text password"
" but client support disabled");
if ((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
server->secType = NTLMv2;
else if (secFlags & CIFSSEC_MAY_NTLM)
server->secType = NTLM;
else if (secFlags & CIFSSEC_MAY_NTLMV2)
server->secType = NTLMv2;
else if (secFlags & CIFSSEC_MAY_KRB5)
server->secType = Kerberos;
else if (secFlags & CIFSSEC_MAY_NTLMSSP)
server->secType = RawNTLMSSP;
else if (secFlags & CIFSSEC_MAY_LANMAN)
server->secType = LANMAN;
else {
rc = -EOPNOTSUPP;
cERROR(1, "Invalid security type");
goto neg_err_exit;
}
/* else ... any others ...? */
/* one byte, so no need to convert this or EncryptionKeyLen from
little endian */
server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
/* probably no need to store and check maxvcs */
server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
server->timeAdj *= 60;
if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
} else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
server->capabilities & CAP_EXTENDED_SECURITY) &&
(pSMBr->EncryptionKeyLength == 0)) {
/* decode security blob */
count = get_bcc(&pSMBr->hdr);
if (count < 16) {
rc = -EIO;
goto neg_err_exit;
}
spin_lock(&cifs_tcp_ses_lock);
if (server->srv_count > 1) {
spin_unlock(&cifs_tcp_ses_lock);
if (memcmp(server->server_GUID,
pSMBr->u.extended_response.
GUID, 16) != 0) {
cFYI(1, "server UID changed");
memcpy(server->server_GUID,
pSMBr->u.extended_response.GUID,
16);
}
} else {
spin_unlock(&cifs_tcp_ses_lock);
memcpy(server->server_GUID,
pSMBr->u.extended_response.GUID, 16);
}
if (count == 16) {
server->secType = RawNTLMSSP;
} else {
rc = decode_negTokenInit(pSMBr->u.extended_response.
SecurityBlob, count - 16,
server);
if (rc == 1)
rc = 0;
else
rc = -EINVAL;
if (server->secType == Kerberos) {
if (!server->sec_kerberos &&
!server->sec_mskerberos)
rc = -EOPNOTSUPP;
} else if (server->secType == RawNTLMSSP) {
if (!server->sec_ntlmssp)
rc = -EOPNOTSUPP;
} else
rc = -EOPNOTSUPP;
}
} else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
rc = -EIO; /* no crypt key only if plain text pwd */
goto neg_err_exit;
} else
server->capabilities &= ~CAP_EXTENDED_SECURITY;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
signing_check:
#endif
if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
/* MUST_SIGN already includes the MAY_SIGN FLAG
so if this is zero it means that signing is disabled */
cFYI(1, "Signing disabled");
if (server->sec_mode & SECMODE_SIGN_REQUIRED) {
cERROR(1, "Server requires "
"packet signing to be enabled in "
"/proc/fs/cifs/SecurityFlags.");
rc = -EOPNOTSUPP;
}
server->sec_mode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
} else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
/* signing required */
cFYI(1, "Must sign - secFlags 0x%x", secFlags);
if ((server->sec_mode &
(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
cERROR(1, "signing required but server lacks support");
rc = -EOPNOTSUPP;
} else
server->sec_mode |= SECMODE_SIGN_REQUIRED;
} else {
/* signing optional ie CIFSSEC_MAY_SIGN */
if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0)
server->sec_mode &=
~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
}
neg_err_exit:
cifs_buf_release(pSMB);
cFYI(1, "negprot rc %d", rc);
return rc;
}
int
CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
{
struct smb_hdr *smb_buffer;
int rc = 0;
cFYI(1, "In tree disconnect");
/* BB: do we need to check this? These should never be NULL. */
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
return -EIO;
/*
* No need to return error on this operation if tid invalidated and
* closed on server already e.g. due to tcp session crashing. Also,
* the tcon is no longer on the list, so no need to take lock before
* checking this.
*/
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
(void **)&smb_buffer);
if (rc)
return rc;
rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
if (rc)
cFYI(1, "Tree disconnect failed %d", rc);
/* No need to return error on this operation if tid invalidated and
closed on server already e.g. due to tcp session crashing */
if (rc == -EAGAIN)
rc = 0;
return rc;
}
/*
* This is a no-op for now. We're not really interested in the reply, but
* rather in the fact that the server sent one and that server->lstrp
* gets updated.
*
* FIXME: maybe we should consider checking that the reply matches request?
*/
static void
cifs_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
DeleteMidQEntry(mid);
atomic_dec(&server->inFlight);
wake_up(&server->request_q);
}
int
CIFSSMBEcho(struct TCP_Server_Info *server)
{
ECHO_REQ *smb;
int rc = 0;
struct kvec iov;
cFYI(1, "In echo request");
rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
if (rc)
return rc;
/* set up echo request */
smb->hdr.Tid = 0xffff;
smb->hdr.WordCount = 1;
put_unaligned_le16(1, &smb->EchoCount);
put_bcc(1, &smb->hdr);
smb->Data[0] = 'a';
inc_rfc1001_len(smb, 3);
iov.iov_base = smb;
iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true);
if (rc)
cFYI(1, "Echo request failed: %d", rc);
cifs_small_buf_release(smb);
return rc;
}
int
CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
{
LOGOFF_ANDX_REQ *pSMB;
int rc = 0;
cFYI(1, "In SMBLogoff for session disconnect");
/*
* BB: do we need to check validity of ses and server? They should
* always be valid since we have an active reference. If not, that
* should probably be a BUG()
*/
if (!ses || !ses->server)
return -EIO;
mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
goto session_already_dead; /* no need to send SMBlogoff if uid
already closed due to reconnect */
rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
if (rc) {
mutex_unlock(&ses->session_mutex);
return rc;
}
pSMB->hdr.Mid = GetNextMid(ses->server);
if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
pSMB->hdr.Uid = ses->Suid;
pSMB->AndXCommand = 0xFF;
rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
session_already_dead:
mutex_unlock(&ses->session_mutex);
/* if session dead then we do not need to do ulogoff,
since server closed smb session, no sense reporting
error */
if (rc == -EAGAIN)
rc = 0;
return rc;
}
int
CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
__u16 type, const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
struct unlink_psx_rq *pRqD;
int name_len;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
cFYI(1, "In POSIX delete");
PsxDelete:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB add path length overrun check */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
}
params = 6 + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = 0; /* BB double check this with jra */
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
/* Setup pointer to Request Data (inode type) */
pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
pRqD->type = cpu_to_le16(type);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + sizeof(struct unlink_psx_rq);
pSMB->DataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
pSMB->TotalDataCount = cpu_to_le16(sizeof(struct unlink_psx_rq));
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_UNLINK);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "Posix delete returned %d", rc);
cifs_buf_release(pSMB);
cifs_stats_inc(&tcon->num_deletes);
if (rc == -EAGAIN)
goto PsxDelete;
return rc;
}
int
CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_FILE_REQ *pSMB = NULL;
DELETE_FILE_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
DelFileRetry:
rc = smb_init(SMB_COM_DELETE, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->fileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->fileName, fileName, name_len);
}
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
pSMB->BufferFormat = 0x04;
inc_rfc1001_len(pSMB, name_len + 1);
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_deletes);
if (rc)
cFYI(1, "Error in RMFile = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto DelFileRetry;
return rc;
}
int
CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName,
const struct nls_table *nls_codepage, int remap)
{
DELETE_DIRECTORY_REQ *pSMB = NULL;
DELETE_DIRECTORY_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
cFYI(1, "In CIFSSMBRmDir");
RmDirRetry:
rc = smb_init(SMB_COM_DELETE_DIRECTORY, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve check for buffer overruns BB */
name_len = strnlen(dirName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->DirName, dirName, name_len);
}
pSMB->BufferFormat = 0x04;
inc_rfc1001_len(pSMB, name_len + 1);
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_rmdirs);
if (rc)
cFYI(1, "Error in RMDir = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto RmDirRetry;
return rc;
}
int
CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
const char *name, const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
CREATE_DIRECTORY_REQ *pSMB = NULL;
CREATE_DIRECTORY_RSP *pSMBr = NULL;
int bytes_returned;
int name_len;
cFYI(1, "In CIFSSMBMkDir");
MkDirRetry:
rc = smb_init(SMB_COM_CREATE_DIRECTORY, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve check for buffer overruns BB */
name_len = strnlen(name, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->DirName, name, name_len);
}
pSMB->BufferFormat = 0x04;
inc_rfc1001_len(pSMB, name_len + 1);
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_mkdirs);
if (rc)
cFYI(1, "Error in Mkdir = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto MkDirRetry;
return rc;
}
int
CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags,
__u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
int name_len;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count, count;
OPEN_PSX_REQ *pdata;
OPEN_PSX_RSP *psx_rsp;
cFYI(1, "In POSIX Create");
PsxCreat:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, name,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(name, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, name, name_len);
}
params = 6 + name_len;
count = sizeof(OPEN_PSX_REQ);
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000); /* large enough */
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
pdata->Permissions = cpu_to_le64(mode);
pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
pdata->OpenFlags = cpu_to_le32(*pOplock);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_POSIX_OPEN);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Posix create returned %d", rc);
goto psx_create_err;
}
cFYI(1, "copying inode info");
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)) {
rc = -EIO; /* bad smb */
goto psx_create_err;
}
/* copy return information to pRetData */
psx_rsp = (OPEN_PSX_RSP *)((char *) &pSMBr->hdr.Protocol
+ le16_to_cpu(pSMBr->t2.DataOffset));
*pOplock = le16_to_cpu(psx_rsp->OplockFlags);
if (netfid)
*netfid = psx_rsp->Fid; /* cifs fid stays in le */
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
if (cpu_to_le32(FILE_CREATE) == psx_rsp->CreateAction)
*pOplock |= CIFS_CREATE_ACTION;
/* check to make sure response data is there */
if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
pRetData->Type = cpu_to_le32(-1); /* unknown */
cFYI(DBG2, "unknown type");
} else {
if (get_bcc(&pSMBr->hdr) < sizeof(OPEN_PSX_RSP)
+ sizeof(FILE_UNIX_BASIC_INFO)) {
cERROR(1, "Open response data too small");
pRetData->Type = cpu_to_le32(-1);
goto psx_create_err;
}
memcpy((char *) pRetData,
(char *)psx_rsp + sizeof(OPEN_PSX_RSP),
sizeof(FILE_UNIX_BASIC_INFO));
}
psx_create_err:
cifs_buf_release(pSMB);
if (posix_flags & SMB_O_DIRECTORY)
cifs_stats_inc(&tcon->num_posixmkdirs);
else
cifs_stats_inc(&tcon->num_posixopens);
if (rc == -EAGAIN)
goto PsxCreat;
return rc;
}
static __u16 convert_disposition(int disposition)
{
__u16 ofun = 0;
switch (disposition) {
case FILE_SUPERSEDE:
ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
break;
case FILE_OPEN:
ofun = SMBOPEN_OAPPEND;
break;
case FILE_CREATE:
ofun = SMBOPEN_OCREATE;
break;
case FILE_OPEN_IF:
ofun = SMBOPEN_OCREATE | SMBOPEN_OAPPEND;
break;
case FILE_OVERWRITE:
ofun = SMBOPEN_OTRUNC;
break;
case FILE_OVERWRITE_IF:
ofun = SMBOPEN_OCREATE | SMBOPEN_OTRUNC;
break;
default:
cFYI(1, "unknown disposition %d", disposition);
ofun = SMBOPEN_OAPPEND; /* regular open */
}
return ofun;
}
static int
access_flags_to_smbopen_mode(const int access_flags)
{
int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE);
if (masked_flags == GENERIC_READ)
return SMBOPEN_READ;
else if (masked_flags == GENERIC_WRITE)
return SMBOPEN_WRITE;
/* just go for read/write */
return SMBOPEN_READWRITE;
}
int
SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
const struct nls_table *nls_codepage, int remap)
{
int rc = -EACCES;
OPENX_REQ *pSMB = NULL;
OPENX_RSP *pSMBr = NULL;
int bytes_returned;
int name_len;
__u16 count;
OldOpenRetry:
rc = smb_init(SMB_COM_OPEN_ANDX, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->AndXCommand = 0xFF; /* none */
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
count = 1; /* account for one byte pad to word boundary */
name_len =
cifsConvertToUCS((__le16 *) (pSMB->fileName + 1),
fileName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve check for buffer overruns BB */
count = 0; /* no pad */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->fileName, fileName, name_len);
}
if (*pOplock & REQ_OPLOCK)
pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
else if (*pOplock & REQ_BATCHOPLOCK)
pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK);
pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO);
pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags));
pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
/* set file as system file if special file such
as fifo and server expecting SFU style and
no Unix extensions */
if (create_options & CREATE_OPTION_SPECIAL)
pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
else /* BB FIXME BB */
pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/);
if (create_options & CREATE_OPTION_READONLY)
pSMB->FileAttributes |= cpu_to_le16(ATTR_READONLY);
/* BB FIXME BB */
/* pSMB->CreateOptions = cpu_to_le32(create_options &
CREATE_OPTIONS_MASK); */
/* BB FIXME END BB */
pSMB->Sattr = cpu_to_le16(ATTR_HIDDEN | ATTR_SYSTEM | ATTR_DIRECTORY);
pSMB->OpenFunction = cpu_to_le16(convert_disposition(openDisposition));
count += name_len;
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
} else {
/* BB verify if wct == 15 */
/* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/
*netfid = pSMBr->Fid; /* cifs fid stays in le */
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
/* BB FIXME BB */
/* if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
*pOplock |= CIFS_CREATE_ACTION; */
/* BB FIXME END */
if (pfile_info) {
pfile_info->CreationTime = 0; /* BB convert CreateTime*/
pfile_info->LastAccessTime = 0; /* BB fixme */
pfile_info->LastWriteTime = 0; /* BB fixme */
pfile_info->ChangeTime = 0; /* BB fixme */
pfile_info->Attributes =
cpu_to_le32(le16_to_cpu(pSMBr->FileAttributes));
/* the file_info buf is endian converted by caller */
pfile_info->AllocationSize =
cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile));
pfile_info->EndOfFile = pfile_info->AllocationSize;
pfile_info->NumberOfLinks = cpu_to_le32(1);
pfile_info->DeletePending = 0;
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto OldOpenRetry;
return rc;
}
int
CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
const struct nls_table *nls_codepage, int remap)
{
int rc = -EACCES;
OPEN_REQ *pSMB = NULL;
OPEN_RSP *pSMBr = NULL;
int bytes_returned;
int name_len;
__u16 count;
openRetry:
rc = smb_init(SMB_COM_NT_CREATE_ANDX, 24, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->AndXCommand = 0xFF; /* none */
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
count = 1; /* account for one byte pad to word boundary */
name_len =
cifsConvertToUCS((__le16 *) (pSMB->fileName + 1),
fileName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
pSMB->NameLength = cpu_to_le16(name_len);
} else { /* BB improve check for buffer overruns BB */
count = 0; /* no pad */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
pSMB->NameLength = cpu_to_le16(name_len);
strncpy(pSMB->fileName, fileName, name_len);
}
if (*pOplock & REQ_OPLOCK)
pSMB->OpenFlags = cpu_to_le32(REQ_OPLOCK);
else if (*pOplock & REQ_BATCHOPLOCK)
pSMB->OpenFlags = cpu_to_le32(REQ_BATCHOPLOCK);
pSMB->DesiredAccess = cpu_to_le32(access_flags);
pSMB->AllocationSize = 0;
/* set file as system file if special file such
as fifo and server expecting SFU style and
no Unix extensions */
if (create_options & CREATE_OPTION_SPECIAL)
pSMB->FileAttributes = cpu_to_le32(ATTR_SYSTEM);
else
pSMB->FileAttributes = cpu_to_le32(ATTR_NORMAL);
/* XP does not handle ATTR_POSIX_SEMANTICS */
/* but it helps speed up case sensitive checks for other
servers such as Samba */
if (tcon->ses->capabilities & CAP_UNIX)
pSMB->FileAttributes |= cpu_to_le32(ATTR_POSIX_SEMANTICS);
if (create_options & CREATE_OPTION_READONLY)
pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);
pSMB->ShareAccess = cpu_to_le32(FILE_SHARE_ALL);
pSMB->CreateDisposition = cpu_to_le32(openDisposition);
pSMB->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
/* BB Expirement with various impersonation levels and verify */
pSMB->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
pSMB->SecurityFlags =
SECURITY_CONTEXT_TRACKING | SECURITY_EFFECTIVE_ONLY;
count += name_len;
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
} else {
*pOplock = pSMBr->OplockLevel; /* 1 byte no need to le_to_cpu */
*netfid = pSMBr->Fid; /* cifs fid stays in le */
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction)
*pOplock |= CIFS_CREATE_ACTION;
if (pfile_info) {
memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime,
36 /* CreationTime to Attributes */);
/* the file_info buf is endian converted by caller */
pfile_info->AllocationSize = pSMBr->AllocationSize;
pfile_info->EndOfFile = pSMBr->EndOfFile;
pfile_info->NumberOfLinks = cpu_to_le32(1);
pfile_info->DeletePending = 0;
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto openRetry;
return rc;
}
int
CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
char **buf, int *pbuf_type)
{
int rc = -EACCES;
READ_REQ *pSMB = NULL;
READ_RSP *pSMBr = NULL;
char *pReadData = NULL;
int wct;
int resp_buf_type = 0;
struct kvec iov[1];
__u32 pid = io_parms->pid;
__u16 netfid = io_parms->netfid;
__u64 offset = io_parms->offset;
struct cifs_tcon *tcon = io_parms->tcon;
unsigned int count = io_parms->length;
cFYI(1, "Reading %d bytes on fid %d", count, netfid);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
wct = 10; /* old style read */
if ((offset >> 32) > 0) {
/* can not handle this big offset for old */
return -EIO;
}
}
*nbytes = 0;
rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
if (wct == 12)
pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
pSMB->Remaining = 0;
pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
pSMB->MaxCountHigh = cpu_to_le32(count >> 16);
if (wct == 12)
pSMB->ByteCount = 0; /* no need to do le conversion since 0 */
else {
/* old style read */
struct smb_com_readx_req *pSMBW =
(struct smb_com_readx_req *)pSMB;
pSMBW->ByteCount = 0;
}
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
&resp_buf_type, CIFS_LOG_ERROR);
cifs_stats_inc(&tcon->num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
cERROR(1, "Send error in read = %d", rc);
} else {
int data_length = le16_to_cpu(pSMBr->DataLengthHigh);
data_length = data_length << 16;
data_length += le16_to_cpu(pSMBr->DataLength);
*nbytes = data_length;
/*check that DataLength would not go beyond end of SMB */
if ((data_length > CIFSMaxBufSize)
|| (data_length > count)) {
cFYI(1, "bad length %d for count %d",
data_length, count);
rc = -EIO;
*nbytes = 0;
} else {
pReadData = (char *) (&pSMBr->hdr.Protocol) +
le16_to_cpu(pSMBr->DataOffset);
/* if (rc = copy_to_user(buf, pReadData, data_length)) {
cERROR(1, "Faulting on read rc = %d",rc);
rc = -EFAULT;
}*/ /* can not use copy_to_user when using page cache*/
if (*buf)
memcpy(*buf, pReadData, data_length);
}
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if (*buf) {
if (resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
} else if (resp_buf_type != CIFS_NO_BUFFER) {
/* return buffer to caller to free */
*buf = iov[0].iov_base;
if (resp_buf_type == CIFS_SMALL_BUFFER)
*pbuf_type = CIFS_SMALL_BUFFER;
else if (resp_buf_type == CIFS_LARGE_BUFFER)
*pbuf_type = CIFS_LARGE_BUFFER;
} /* else no valid buffer on return - leave as null */
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, const char *buf,
const char __user *ubuf, const int long_op)
{
int rc = -EACCES;
WRITE_REQ *pSMB = NULL;
WRITE_RSP *pSMBr = NULL;
int bytes_returned, wct;
__u32 bytes_sent;
__u16 byte_count;
__u32 pid = io_parms->pid;
__u16 netfid = io_parms->netfid;
__u64 offset = io_parms->offset;
struct cifs_tcon *tcon = io_parms->tcon;
unsigned int count = io_parms->length;
*nbytes = 0;
/* cFYI(1, "write at %lld %d bytes", offset, count);*/
if (tcon->ses == NULL)
return -ECONNABORTED;
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 14;
else {
wct = 12;
if ((offset >> 32) > 0) {
/* can not handle big offset for old srv */
return -EIO;
}
}
rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
if (wct == 14)
pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
pSMB->Reserved = 0xFFFFFFFF;
pSMB->WriteMode = 0;
pSMB->Remaining = 0;
/* Can increase buffer size if buffer is big enough in some cases ie we
can send more if LARGE_WRITE_X capability returned by the server and if
our buffer is big enough or if we convert to iovecs on socket writes
and eliminate the copy to the CIFS buffer */
if (tcon->ses->capabilities & CAP_LARGE_WRITE_X) {
bytes_sent = min_t(const unsigned int, CIFSMaxBufSize, count);
} else {
bytes_sent = (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)
& ~0xFF;
}
if (bytes_sent > count)
bytes_sent = count;
pSMB->DataOffset =
cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
if (buf)
memcpy(pSMB->Data, buf, bytes_sent);
else if (ubuf) {
if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) {
cifs_buf_release(pSMB);
return -EFAULT;
}
} else if (count != 0) {
/* No buffer */
cifs_buf_release(pSMB);
return -EINVAL;
} /* else setting file size with write of zero bytes */
if (wct == 14)
byte_count = bytes_sent + 1; /* pad */
else /* wct == 12 */
byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF);
pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
inc_rfc1001_len(pSMB, byte_count);
if (wct == 14)
pSMB->ByteCount = cpu_to_le16(byte_count);
else { /* old style write has byte count 4 bytes earlier
so 4 bytes pad */
struct smb_com_writex_req *pSMBW =
(struct smb_com_writex_req *)pSMB;
pSMBW->ByteCount = cpu_to_le16(byte_count);
}
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, long_op);
cifs_stats_inc(&tcon->num_writes);
if (rc) {
cFYI(1, "Send error in write = %d", rc);
} else {
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
/*
* Mask off high 16 bits when bytes written as returned by the
* server is greater than bytes requested by the client. Some
* OS/2 servers are known to set incorrect CountHigh values.
*/
if (*nbytes > count)
*nbytes &= 0xFFFF;
}
cifs_buf_release(pSMB);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
void
cifs_writedata_release(struct kref *refcount)
{
struct cifs_writedata *wdata = container_of(refcount,
struct cifs_writedata, refcount);
if (wdata->cfile)
cifsFileInfo_put(wdata->cfile);
kfree(wdata);
}
/*
* Write failed with a retryable error. Resend the write request. It's also
* possible that the page was redirtied so re-clean the page.
*/
static void
cifs_writev_requeue(struct cifs_writedata *wdata)
{
int i, rc;
struct inode *inode = wdata->cfile->dentry->d_inode;
for (i = 0; i < wdata->nr_pages; i++) {
lock_page(wdata->pages[i]);
clear_page_dirty_for_io(wdata->pages[i]);
}
do {
rc = cifs_async_writev(wdata);
} while (rc == -EAGAIN);
for (i = 0; i < wdata->nr_pages; i++) {
if (rc != 0)
SetPageError(wdata->pages[i]);
unlock_page(wdata->pages[i]);
}
mapping_set_error(inode->i_mapping, rc);
kref_put(&wdata->refcount, cifs_writedata_release);
}
static void
cifs_writev_complete(struct work_struct *work)
{
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
struct inode *inode = wdata->cfile->dentry->d_inode;
int i = 0;
if (wdata->result == 0) {
cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
wdata->bytes);
} else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
return cifs_writev_requeue(wdata);
for (i = 0; i < wdata->nr_pages; i++) {
struct page *page = wdata->pages[i];
if (wdata->result == -EAGAIN)
__set_page_dirty_nobuffers(page);
else if (wdata->result < 0)
SetPageError(page);
end_page_writeback(page);
page_cache_release(page);
}
if (wdata->result != -EAGAIN)
mapping_set_error(inode->i_mapping, wdata->result);
kref_put(&wdata->refcount, cifs_writedata_release);
}
struct cifs_writedata *
cifs_writedata_alloc(unsigned int nr_pages)
{
struct cifs_writedata *wdata;
/* this would overflow */
if (nr_pages == 0) {
cERROR(1, "%s: called with nr_pages == 0!", __func__);
return NULL;
}
/* writedata + number of page pointers */
wdata = kzalloc(sizeof(*wdata) +
sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
if (wdata != NULL) {
INIT_WORK(&wdata->work, cifs_writev_complete);
kref_init(&wdata->refcount);
}
return wdata;
}
/*
* Check the midState and signature on received buffer (if any), and queue the
* workqueue completion task.
*/
static void
cifs_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
unsigned int written;
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
switch (mid->midState) {
case MID_RESPONSE_RECEIVED:
wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
if (wdata->result != 0)
break;
written = le16_to_cpu(smb->CountHigh);
written <<= 16;
written += le16_to_cpu(smb->Count);
/*
* Mask off high 16 bits when bytes written as returned
* by the server is greater than bytes requested by the
* client. OS/2 servers are known to set incorrect
* CountHigh values.
*/
if (written > wdata->bytes)
written &= 0xFFFF;
if (written < wdata->bytes)
wdata->result = -ENOSPC;
else
wdata->bytes = written;
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
wdata->result = -EAGAIN;
break;
default:
wdata->result = -EIO;
break;
}
queue_work(system_nrt_wq, &wdata->work);
DeleteMidQEntry(mid);
atomic_dec(&tcon->ses->server->inFlight);
wake_up(&tcon->ses->server->request_q);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
int
cifs_async_writev(struct cifs_writedata *wdata)
{
int i, rc = -EACCES;
WRITE_REQ *smb = NULL;
int wct;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct inode *inode = wdata->cfile->dentry->d_inode;
struct kvec *iov = NULL;
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
wct = 14;
} else {
wct = 12;
if (wdata->offset >> 32 > 0) {
/* can not handle big offset for old srv */
return -EIO;
}
}
rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb);
if (rc)
goto async_writev_out;
/* 1 iov per page + 1 for header */
iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS);
if (iov == NULL) {
rc = -ENOMEM;
goto async_writev_out;
}
smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = wdata->cfile->netfid;
smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
if (wct == 14)
smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
smb->Reserved = 0xFFFFFFFF;
smb->WriteMode = 0;
smb->Remaining = 0;
smb->DataOffset =
cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
/* 4 for RFC1001 length + 1 for BCC */
iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
iov[0].iov_base = smb;
/* marshal up the pages into iov array */
wdata->bytes = 0;
for (i = 0; i < wdata->nr_pages; i++) {
iov[i + 1].iov_len = min(inode->i_size -
page_offset(wdata->pages[i]),
(loff_t)PAGE_CACHE_SIZE);
iov[i + 1].iov_base = kmap(wdata->pages[i]);
wdata->bytes += iov[i + 1].iov_len;
}
cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
if (wct == 14) {
inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
put_bcc(wdata->bytes + 1, &smb->hdr);
} else {
/* wct == 12 */
struct smb_com_writex_req *smbw =
(struct smb_com_writex_req *)smb;
inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
put_bcc(wdata->bytes + 5, &smbw->hdr);
iov[0].iov_len += 4; /* pad bigger by four bytes */
}
kref_get(&wdata->refcount);
rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
cifs_writev_callback, wdata, false);
if (rc == 0)
cifs_stats_inc(&tcon->num_writes);
else
kref_put(&wdata->refcount, cifs_writedata_release);
/* send is done, unmap pages */
for (i = 0; i < wdata->nr_pages; i++)
kunmap(wdata->pages[i]);
async_writev_out:
cifs_small_buf_release(smb);
kfree(iov);
return rc;
}
int
CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec,
const int long_op)
{
int rc = -EACCES;
WRITE_REQ *pSMB = NULL;
int wct;
int smb_hdr_len;
int resp_buf_type = 0;
__u32 pid = io_parms->pid;
__u16 netfid = io_parms->netfid;
__u64 offset = io_parms->offset;
struct cifs_tcon *tcon = io_parms->tcon;
unsigned int count = io_parms->length;
*nbytes = 0;
cFYI(1, "write2 at %lld %d bytes", (long long)offset, count);
if (tcon->ses->capabilities & CAP_LARGE_FILES) {
wct = 14;
} else {
wct = 12;
if ((offset >> 32) > 0) {
/* can not handle big offset for old srv */
return -EIO;
}
}
rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
/* tcon and ses pointer are checked in smb_init */
if (tcon->ses->server == NULL)
return -ECONNABORTED;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid;
pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
if (wct == 14)
pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
pSMB->Reserved = 0xFFFFFFFF;
pSMB->WriteMode = 0;
pSMB->Remaining = 0;
pSMB->DataOffset =
cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
pSMB->DataLengthLow = cpu_to_le16(count & 0xFFFF);
pSMB->DataLengthHigh = cpu_to_le16(count >> 16);
/* header + 1 byte pad */
smb_hdr_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 1;
if (wct == 14)
inc_rfc1001_len(pSMB, count + 1);
else /* wct == 12 */
inc_rfc1001_len(pSMB, count + 5); /* smb data starts later */
if (wct == 14)
pSMB->ByteCount = cpu_to_le16(count + 1);
else /* wct == 12 */ /* bigger pad, smaller smb hdr, keep offset ok */ {
struct smb_com_writex_req *pSMBW =
(struct smb_com_writex_req *)pSMB;
pSMBW->ByteCount = cpu_to_le16(count + 5);
}
iov[0].iov_base = pSMB;
if (wct == 14)
iov[0].iov_len = smb_hdr_len + 4;
else /* wct == 12 pad bigger by four bytes */
iov[0].iov_len = smb_hdr_len + 8;
rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
long_op);
cifs_stats_inc(&tcon->num_writes);
if (rc) {
cFYI(1, "Send error Write2 = %d", rc);
} else if (resp_buf_type == 0) {
/* presumably this can not happen, but best to be safe */
rc = -EIO;
} else {
WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base;
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
/*
* Mask off high 16 bits when bytes written as returned by the
* server is greater than bytes requested by the client. OS/2
* servers are known to set incorrect CountHigh values.
*/
if (*nbytes > count)
*nbytes &= 0xFFFF;
}
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if (resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
const bool waitFlag, const __u8 oplock_level)
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
/* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */
int bytes_returned;
int timeout = 0;
__u16 count;
cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock);
rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
if (rc)
return rc;
if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
timeout = CIFS_ASYNC_OP; /* no response expected */
pSMB->Timeout = 0;
} else if (waitFlag) {
timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
} else {
pSMB->Timeout = 0;
}
pSMB->NumberOfLocks = cpu_to_le16(numLock);
pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
pSMB->LockType = lockType;
pSMB->OplockLevel = oplock_level;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = smb_file_id; /* netfid stays le */
if ((numLock != 0) || (numUnlock != 0)) {
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
/* BB where to store pid high? */
pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
pSMB->Locks[0].OffsetLow = cpu_to_le32((u32)offset);
pSMB->Locks[0].OffsetHigh = cpu_to_le32((u32)(offset>>32));
count = sizeof(LOCKING_ANDX_RANGE);
} else {
/* oplock break */
count = 0;
}
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
if (waitFlag) {
rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMB, &bytes_returned);
cifs_small_buf_release(pSMB);
} else {
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
timeout);
/* SMB buffer freed by function above */
}
cifs_stats_inc(&tcon->num_locks);
if (rc)
cFYI(1, "Send error in Lock = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag, const __u64 len,
struct file_lock *pLockData, const __u16 lock_type,
const bool waitFlag)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
struct cifs_posix_lock *parm_data;
int rc = 0;
int timeout = 0;
int bytes_returned = 0;
int resp_buf_type = 0;
__u16 params, param_offset, offset, byte_count, count;
struct kvec iov[1];
cFYI(1, "Posix Lock");
if (pLockData == NULL)
return -EINVAL;
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
count = sizeof(struct cifs_posix_lock);
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
if (get_flag)
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
else
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
parm_data = (struct cifs_posix_lock *)
(((char *) &pSMB->hdr.Protocol) + offset);
parm_data->lock_type = cpu_to_le16(lock_type);
if (waitFlag) {
timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
parm_data->lock_flags = cpu_to_le16(1);
pSMB->Timeout = cpu_to_le32(-1);
} else
pSMB->Timeout = 0;
parm_data->pid = cpu_to_le32(current->tgid);
parm_data->start = cpu_to_le64(pLockData->fl_start);
parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->Fid = smb_file_id;
pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
if (waitFlag) {
rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned);
} else {
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
&resp_buf_type, timeout);
pSMB = NULL; /* request buf already freed by SendReceive2. Do
not try to free it twice below on exit */
pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base;
}
if (rc) {
cFYI(1, "Send error in Posix Lock = %d", rc);
} else if (get_flag) {
/* lock structure can be returned on get */
__u16 data_offset;
__u16 data_count;
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < sizeof(*parm_data)) {
rc = -EIO; /* bad smb */
goto plk_err_exit;
}
data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
data_count = le16_to_cpu(pSMBr->t2.DataCount);
if (data_count < sizeof(struct cifs_posix_lock)) {
rc = -EIO;
goto plk_err_exit;
}
parm_data = (struct cifs_posix_lock *)
((char *)&pSMBr->hdr.Protocol + data_offset);
if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK))
pLockData->fl_type = F_UNLCK;
else {
if (parm_data->lock_type ==
__constant_cpu_to_le16(CIFS_RDLCK))
pLockData->fl_type = F_RDLCK;
else if (parm_data->lock_type ==
__constant_cpu_to_le16(CIFS_WRLCK))
pLockData->fl_type = F_WRLCK;
pLockData->fl_start = le64_to_cpu(parm_data->start);
pLockData->fl_end = pLockData->fl_start +
le64_to_cpu(parm_data->length) - 1;
pLockData->fl_pid = le32_to_cpu(parm_data->pid);
}
}
plk_err_exit:
if (pSMB)
cifs_small_buf_release(pSMB);
if (resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if (resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
CLOSE_REQ *pSMB = NULL;
cFYI(1, "In CIFSSMBClose");
/* do not retry on dead session on close */
rc = small_smb_init(SMB_COM_CLOSE, 3, tcon, (void **) &pSMB);
if (rc == -EAGAIN)
return 0;
if (rc)
return rc;
pSMB->FileID = (__u16) smb_file_id;
pSMB->LastWriteTime = 0xFFFFFFFF;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
cifs_stats_inc(&tcon->num_closes);
if (rc) {
if (rc != -EINTR) {
/* EINTR is expected when user ctl-c to kill app */
cERROR(1, "Send error in Close = %d", rc);
}
}
/* Since session is dead, file will be closed on server already */
if (rc == -EAGAIN)
rc = 0;
return rc;
}
int
CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
FLUSH_REQ *pSMB = NULL;
cFYI(1, "In CIFSSMBFlush");
rc = small_smb_init(SMB_COM_FLUSH, 1, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->FileID = (__u16) smb_file_id;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
cifs_stats_inc(&tcon->num_flushes);
if (rc)
cERROR(1, "Send error in Flush = %d", rc);
return rc;
}
int
CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
RENAME_REQ *pSMB = NULL;
RENAME_RSP *pSMBr = NULL;
int bytes_returned;
int name_len, name_len2;
__u16 count;
cFYI(1, "In CIFSSMBRename");
renameRetry:
rc = smb_init(SMB_COM_RENAME, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->BufferFormat = 0x04;
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
pSMB->OldFileName[name_len] = 0x04; /* pad */
/* protocol requires ASCII signature byte on Unicode string */
pSMB->OldFileName[name_len + 1] = 0x00;
name_len2 =
cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->OldFileName, fromName, name_len);
name_len2 = strnlen(toName, PATH_MAX);
name_len2++; /* trailing null */
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
name_len2++; /* trailing null */
name_len2++; /* signature byte */
}
count = 1 /* 1st signature byte */ + name_len + name_len2;
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_renames);
if (rc)
cFYI(1, "Send error in rename = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto renameRetry;
return rc;
}
int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
int netfid, const char *target_name,
const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
struct set_file_rename *rename_info;
char *data_offset;
char dummy_string[30];
int rc = 0;
int bytes_returned = 0;
int len_of_str;
__u16 params, param_offset, offset, count, byte_count;
cFYI(1, "Rename to File by handle");
rc = smb_init(SMB_COM_TRANSACTION2, 15, pTcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
rename_info = (struct set_file_rename *) data_offset;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
/* construct random name ".cifs_tmp<inodenum><mid>" */
rename_info->overwrite = cpu_to_le32(1);
rename_info->root_fid = 0;
/* unicode only call */
if (target_name == NULL) {
sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
dummy_string, 24, nls_codepage, remap);
} else {
len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
target_name, PATH_MAX, nls_codepage,
remap);
}
rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str);
byte_count += count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->Fid = netfid;
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_RENAME_INFORMATION);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&pTcon->num_t2renames);
if (rc)
cFYI(1, "Send error in Rename (by file handle) = %d", rc);
cifs_buf_release(pSMB);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName,
const __u16 target_tid, const char *toName, const int flags,
const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
COPY_REQ *pSMB = NULL;
COPY_RSP *pSMBr = NULL;
int bytes_returned;
int name_len, name_len2;
__u16 count;
cFYI(1, "In CIFSSMBCopy");
copyRetry:
rc = smb_init(SMB_COM_COPY, 1, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->BufferFormat = 0x04;
pSMB->Tid2 = target_tid;
pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName,
fromName, PATH_MAX, nls_codepage,
remap);
name_len++; /* trailing null */
name_len *= 2;
pSMB->OldFileName[name_len] = 0x04; /* pad */
/* protocol requires ASCII signature byte on Unicode string */
pSMB->OldFileName[name_len + 1] = 0x00;
name_len2 =
cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->OldFileName, fromName, name_len);
name_len2 = strnlen(toName, PATH_MAX);
name_len2++; /* trailing null */
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
name_len2++; /* trailing null */
name_len2++; /* signature byte */
}
count = 1 /* 1st signature byte */ + name_len + name_len2;
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in copy = %d with %d files copied",
rc, le16_to_cpu(pSMBr->CopyCount));
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto copyRetry;
return rc;
}
int
CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
char *data_offset;
int name_len;
int name_len_target;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
cFYI(1, "In Symlink Unix style");
createSymLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX
/* find define for this maxpathcomponent */
, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fromName, name_len);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len_target =
cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX
/* find define for this maxpathcomponent */
, nls_codepage);
name_len_target++; /* trailing null */
name_len_target *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len_target = strnlen(toName, PATH_MAX);
name_len_target++; /* trailing null */
strncpy(data_offset, toName, name_len_target);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max on data count below from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + name_len_target;
pSMB->DataCount = cpu_to_le16(name_len_target);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_LINK);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_symlinks);
if (rc)
cFYI(1, "Send error in SetPathInfo create symlink = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto createSymLinkRetry;
return rc;
}
int
CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
char *data_offset;
int name_len;
int name_len_target;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
cFYI(1, "In Create Hard link Unix style");
createHardLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(toName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, toName, name_len);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len_target =
cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX,
nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len_target = strnlen(fromName, PATH_MAX);
name_len_target++; /* trailing null */
strncpy(data_offset, fromName, name_len_target);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max on data count below from sess*/
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + name_len_target;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->DataCount = cpu_to_le16(name_len_target);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_HLINK);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
if (rc)
cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto createHardLinkRetry;
return rc;
}
int
CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
NT_RENAME_REQ *pSMB = NULL;
RENAME_RSP *pSMBr = NULL;
int bytes_returned;
int name_len, name_len2;
__u16 count;
cFYI(1, "In CIFSCreateHardLink");
winCreateHardLinkRetry:
rc = smb_init(SMB_COM_NT_RENAME, 4, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
pSMB->Flags = cpu_to_le16(CREATE_HARD_LINK);
pSMB->ClusterCount = 0;
pSMB->BufferFormat = 0x04;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
/* protocol specifies ASCII buffer format (0x04) for unicode */
pSMB->OldFileName[name_len] = 0x04;
pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
name_len2 =
cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fromName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->OldFileName, fromName, name_len);
name_len2 = strnlen(toName, PATH_MAX);
name_len2++; /* trailing null */
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
name_len2++; /* trailing null */
name_len2++; /* signature byte */
}
count = 1 /* string type byte */ + name_len + name_len2;
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
if (rc)
cFYI(1, "Send error in hard link (NT rename) = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto winCreateHardLinkRetry;
return rc;
}
int
CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage)
{
/* SMB_QUERY_FILE_UNIX_LINK */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
__u16 params, byte_count;
char *data_start;
cFYI(1, "In QPathSymLinkInfo (Unix) for path %s", searchName);
querySymLinkRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifs_strtoUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_LINK);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QuerySymLinkInfo = %d", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
/* BB also check enough total bytes returned */
if (rc || get_bcc(&pSMBr->hdr) < 2)
rc = -EIO;
else {
bool is_unicode;
u16 count = le16_to_cpu(pSMBr->t2.DataCount);
data_start = ((char *) &pSMBr->hdr.Protocol) +
le16_to_cpu(pSMBr->t2.DataOffset);
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
else
is_unicode = false;
/* BB FIXME investigate remapping reserved chars here */
*symlinkinfo = cifs_strndup_from_ucs(data_start, count,
is_unicode, nls_codepage);
if (!*symlinkinfo)
rc = -ENOMEM;
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto querySymLinkRetry;
return rc;
}
#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
/*
* Recent Windows versions now create symlinks more frequently
* and they use the "reparse point" mechanism below. We can of course
* do symlinks nicely to Samba and other servers which support the
* CIFS Unix Extensions and we can also do SFU symlinks and "client only"
* "MF" symlinks optionally, but for recent Windows we really need to
* reenable the code below and fix the cifs_symlink callers to handle this.
* In the interim this code has been moved to its own config option so
* it is not compiled in by default until callers fixed up and more tested.
*/
int
CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage)
{
int rc = 0;
int bytes_returned;
struct smb_com_transaction_ioctl_req *pSMB;
struct smb_com_transaction_ioctl_rsp *pSMBr;
cFYI(1, "In Windows reparse style QueryLink for path %s", searchName);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->TotalParameterCount = 0 ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le32(2);
/* BB find exact data count max from sess structure BB */
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0;
pSMB->ParameterOffset = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 4;
pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT);
pSMB->IsFsctl = 1; /* FSCTL */
pSMB->IsRootFlag = 0;
pSMB->Fid = fid; /* file handle always le */
pSMB->ByteCount = 0;
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QueryReparseLinkInfo = %d", rc);
} else { /* decode response */
__u32 data_offset = le32_to_cpu(pSMBr->DataOffset);
__u32 data_count = le32_to_cpu(pSMBr->DataCount);
if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) {
/* BB also check enough total bytes returned */
rc = -EIO; /* bad smb */
goto qreparse_out;
}
if (data_count && (data_count < 2048)) {
char *end_of_smb = 2 /* sizeof byte count */ +
get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
struct reparse_data *reparse_buf =
(struct reparse_data *)
((char *)&pSMBr->hdr.Protocol
+ data_offset);
if ((char *)reparse_buf >= end_of_smb) {
rc = -EIO;
goto qreparse_out;
}
if ((reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset +
reparse_buf->TargetNameLen) > end_of_smb) {
cFYI(1, "reparse buf beyond SMB");
rc = -EIO;
goto qreparse_out;
}
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
cifs_from_ucs2(symlinkinfo, (__le16 *)
(reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset),
buflen,
reparse_buf->TargetNameLen,
nls_codepage, 0);
} else { /* ASCII names */
strncpy(symlinkinfo,
reparse_buf->LinkNamesBuf +
reparse_buf->TargetNameOffset,
min_t(const int, buflen,
reparse_buf->TargetNameLen));
}
} else {
rc = -EIO;
cFYI(1, "Invalid return data count on "
"get reparse info ioctl");
}
symlinkinfo[buflen] = 0; /* just in case so the caller
does not go off the end of the buffer */
cFYI(1, "readlink result - %s", symlinkinfo);
}
qreparse_out:
cifs_buf_release(pSMB);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
#endif /* CIFS_SYMLINK_EXPERIMENTAL */ /* BB temporarily unused */
#ifdef CONFIG_CIFS_POSIX
/*Convert an Access Control Entry from wire format to local POSIX xattr format*/
static void cifs_convert_ace(posix_acl_xattr_entry *ace,
struct cifs_posix_ace *cifs_ace)
{
/* u8 cifs fields do not need le conversion */
ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag);
ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
/* cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id); */
return;
}
/* Convert ACL from CIFS POSIX wire format to local Linux POSIX ACL xattr */
static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
const int acl_type, const int size_of_data_area)
{
int size = 0;
int i;
__u16 count;
struct cifs_posix_ace *pACE;
struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)trgt;
if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
return -EOPNOTSUPP;
if (acl_type & ACL_TYPE_ACCESS) {
count = le16_to_cpu(cifs_acl->access_entry_count);
pACE = &cifs_acl->ace_array[0];
size = sizeof(struct cifs_posix_acl);
size += sizeof(struct cifs_posix_ace) * count;
/* check if we would go beyond end of SMB */
if (size_of_data_area < size) {
cFYI(1, "bad CIFS POSIX ACL size %d vs. %d",
size_of_data_area, size);
return -EINVAL;
}
} else if (acl_type & ACL_TYPE_DEFAULT) {
count = le16_to_cpu(cifs_acl->access_entry_count);
size = sizeof(struct cifs_posix_acl);
size += sizeof(struct cifs_posix_ace) * count;
/* skip past access ACEs to get to default ACEs */
pACE = &cifs_acl->ace_array[count];
count = le16_to_cpu(cifs_acl->default_entry_count);
size += sizeof(struct cifs_posix_ace) * count;
/* check if we would go beyond end of SMB */
if (size_of_data_area < size)
return -EINVAL;
} else {
/* illegal type */
return -EINVAL;
}
size = posix_acl_xattr_size(count);
if ((buflen == 0) || (local_acl == NULL)) {
/* used to query ACL EA size */
} else if (size > buflen) {
return -ERANGE;
} else /* buffer big enough */ {
local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
for (i = 0; i < count ; i++) {
cifs_convert_ace(&local_acl->a_entries[i], pACE);
pACE++;
}
}
return size;
}
static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
const posix_acl_xattr_entry *local_ace)
{
__u16 rc = 0; /* 0 = ACL converted ok */
cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag);
/* BB is there a better way to handle the large uid? */
if (local_ace->e_id == cpu_to_le32(-1)) {
/* Probably no need to le convert -1 on any arch but can not hurt */
cifs_ace->cifs_uid = cpu_to_le64(-1);
} else
cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
/*cFYI(1, "perm %d tag %d id %d",ace->e_perm,ace->e_tag,ace->e_id);*/
return rc;
}
/* Convert ACL from local Linux POSIX xattr to CIFS POSIX ACL wire format */
static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
const int buflen, const int acl_type)
{
__u16 rc = 0;
struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
posix_acl_xattr_header *local_acl = (posix_acl_xattr_header *)pACL;
int count;
int i;
if ((buflen == 0) || (pACL == NULL) || (cifs_acl == NULL))
return 0;
count = posix_acl_xattr_count((size_t)buflen);
cFYI(1, "setting acl with %d entries from buf of length %d and "
"version of %d",
count, buflen, le32_to_cpu(local_acl->a_version));
if (le32_to_cpu(local_acl->a_version) != 2) {
cFYI(1, "unknown POSIX ACL version %d",
le32_to_cpu(local_acl->a_version));
return 0;
}
cifs_acl->version = cpu_to_le16(1);
if (acl_type == ACL_TYPE_ACCESS)
cifs_acl->access_entry_count = cpu_to_le16(count);
else if (acl_type == ACL_TYPE_DEFAULT)
cifs_acl->default_entry_count = cpu_to_le16(count);
else {
cFYI(1, "unknown ACL type %d", acl_type);
return 0;
}
for (i = 0; i < count; i++) {
rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
&local_acl->a_entries[i]);
if (rc != 0) {
/* ACE not converted */
break;
}
}
if (rc == 0) {
rc = (__u16)(count * sizeof(struct cifs_posix_ace));
rc += sizeof(struct cifs_posix_acl);
/* BB add check to make sure ACL does not overflow SMB */
}
return rc;
}
int
CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap)
{
/* SMB_QUERY_POSIX_ACL */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
__u16 params, byte_count;
cFYI(1, "In GetPosixACL (Unix) for path %s", searchName);
queryAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
pSMB->FileName[name_len] = 0;
pSMB->FileName[name_len+1] = 0;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max data count below from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(
offsetof(struct smb_com_transaction2_qpi_req,
InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_ACL);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
cFYI(1, "Send error in Query POSIX ACL = %d", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
/* BB also check enough total bytes returned */
if (rc || get_bcc(&pSMBr->hdr) < 2)
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
rc = cifs_copy_posix_acl(acl_inf,
(char *)&pSMBr->hdr.Protocol+data_offset,
buflen, acl_type, count);
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto queryAclRetry;
return rc;
}
int
CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen,
const int acl_type,
const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
char *parm_data;
int name_len;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count, data_count, param_offset, offset;
cFYI(1, "In SetPosixACL (Unix) for path %s", fileName);
setAclRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
}
params = 6 + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB size from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
parm_data = ((char *) &pSMB->hdr.Protocol) + offset;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
/* convert to on the wire format for POSIX ACL */
data_count = ACL_to_cifs_posix(parm_data, local_acl, buflen, acl_type);
if (data_count == 0) {
rc = -EOPNOTSUPP;
goto setACLerrorExit;
}
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_ACL);
byte_count = 3 /* pad */ + params + data_count;
pSMB->DataCount = cpu_to_le16(data_count);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "Set POSIX ACL returned %d", rc);
setACLerrorExit:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto setAclRetry;
return rc;
}
/* BB fix tabs in this function FIXME BB */
int
CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
{
int rc = 0;
struct smb_t2_qfi_req *pSMB = NULL;
struct smb_t2_qfi_rsp *pSMBr = NULL;
int bytes_returned;
__u16 params, byte_count;
cFYI(1, "In GetExtAttr");
if (tcon == NULL)
return -ENODEV;
GetExtAttrRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2 /* level */ + 2 /* fid */;
pSMB->t2.TotalDataCount = 0;
pSMB->t2.MaxParameterCount = cpu_to_le16(4);
/* BB find exact max data count below from sess structure BB */
pSMB->t2.MaxDataCount = cpu_to_le16(4000);
pSMB->t2.MaxSetupCount = 0;
pSMB->t2.Reserved = 0;
pSMB->t2.Flags = 0;
pSMB->t2.Timeout = 0;
pSMB->t2.Reserved2 = 0;
pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
Fid) - 4);
pSMB->t2.DataCount = 0;
pSMB->t2.DataOffset = 0;
pSMB->t2.SetupCount = 1;
pSMB->t2.Reserved3 = 0;
pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->t2.TotalParameterCount = cpu_to_le16(params);
pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_ATTR_FLAGS);
pSMB->Pad = 0;
pSMB->Fid = netfid;
inc_rfc1001_len(pSMB, byte_count);
pSMB->t2.ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "error %d in GetExtAttr", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
/* BB also check enough total bytes returned */
if (rc || get_bcc(&pSMBr->hdr) < 2)
/* If rc should we check for EOPNOSUPP and
disable the srvino flag? or in caller? */
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
struct file_chattr_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count != 16) {
cFYI(1, "Illegal size ret in GetExtAttr");
rc = -EIO;
goto GetExtAttrOut;
}
pfinfo = (struct file_chattr_info *)
(data_offset + (char *) &pSMBr->hdr.Protocol);
*pExtAttrBits = le64_to_cpu(pfinfo->mode);
*pMask = le64_to_cpu(pfinfo->mask);
}
}
GetExtAttrOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto GetExtAttrRetry;
return rc;
}
#endif /* CONFIG_POSIX */
#ifdef CONFIG_CIFS_ACL
/*
* Initialize NT TRANSACT SMB into small smb request buffer. This assumes that
* all NT TRANSACTS that we init here have total parm and data under about 400
* bytes (to fit in small cifs buffer size), which is the case so far, it
* easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of
* returned setup area) and MaxParameterCount (returned parms size) must be set
* by caller
*/
static int
smb_init_nttransact(const __u16 sub_command, const int setup_count,
const int parm_len, struct cifs_tcon *tcon,
void **ret_buf)
{
int rc;
__u32 temp_offset;
struct smb_com_ntransact_req *pSMB;
rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
(void **)&pSMB);
if (rc)
return rc;
*ret_buf = (void *)pSMB;
pSMB->Reserved = 0;
pSMB->TotalParameterCount = cpu_to_le32(parm_len);
pSMB->TotalDataCount = 0;
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->DataCount = pSMB->TotalDataCount;
temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
(setup_count * 2) - 4 /* for rfc1001 length itself */;
pSMB->ParameterOffset = cpu_to_le32(temp_offset);
pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
pSMB->SubCommand = cpu_to_le16(sub_command);
return 0;
}
static int
validate_ntransact(char *buf, char **ppparm, char **ppdata,
__u32 *pparmlen, __u32 *pdatalen)
{
char *end_of_smb;
__u32 data_count, data_offset, parm_count, parm_offset;
struct smb_com_ntransact_rsp *pSMBr;
u16 bcc;
*pdatalen = 0;
*pparmlen = 0;
if (buf == NULL)
return -EINVAL;
pSMBr = (struct smb_com_ntransact_rsp *)buf;
bcc = get_bcc(&pSMBr->hdr);
end_of_smb = 2 /* sizeof byte count */ + bcc +
(char *)&pSMBr->ByteCount;
data_offset = le32_to_cpu(pSMBr->DataOffset);
data_count = le32_to_cpu(pSMBr->DataCount);
parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
parm_count = le32_to_cpu(pSMBr->ParameterCount);
*ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
*ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
/* should we also check that parm and data areas do not overlap? */
if (*ppparm > end_of_smb) {
cFYI(1, "parms start after end of smb");
return -EINVAL;
} else if (parm_count + *ppparm > end_of_smb) {
cFYI(1, "parm end after end of smb");
return -EINVAL;
} else if (*ppdata > end_of_smb) {
cFYI(1, "data starts after end of smb");
return -EINVAL;
} else if (data_count + *ppdata > end_of_smb) {
cFYI(1, "data %p + count %d (%p) past smb end %p start %p",
*ppdata, data_count, (data_count + *ppdata),
end_of_smb, pSMBr);
return -EINVAL;
} else if (parm_count + data_count > bcc) {
cFYI(1, "parm count and data count larger than SMB");
return -EINVAL;
}
*pdatalen = data_count;
*pparmlen = parm_count;
return 0;
}
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd **acl_inf, __u32 *pbuflen)
{
int rc = 0;
int buf_type = 0;
QUERY_SEC_DESC_REQ *pSMB;
struct kvec iov[1];
cFYI(1, "GetCifsACL");
*pbuflen = 0;
*acl_inf = NULL;
rc = smb_init_nttransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0,
8 /* parm len */, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->MaxParameterCount = cpu_to_le32(4);
/* BB TEST with big acls that might need to be e.g. larger than 16K */
pSMB->MaxSetupCount = 0;
pSMB->Fid = fid; /* file handle always le */
pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
CIFS_ACL_DACL);
pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
inc_rfc1001_len(pSMB, 11);
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
0);
cifs_stats_inc(&tcon->num_acl_get);
if (rc) {
cFYI(1, "Send error in QuerySecDesc = %d", rc);
} else { /* decode response */
__le32 *parm;
__u32 parm_len;
__u32 acl_len;
struct smb_com_ntransact_rsp *pSMBr;
char *pdata;
/* validate_nttransact */
rc = validate_ntransact(iov[0].iov_base, (char **)&parm,
&pdata, &parm_len, pbuflen);
if (rc)
goto qsec_out;
pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
cFYI(1, "smb %p parm %p data %p", pSMBr, parm, *acl_inf);
if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
rc = -EIO; /* bad smb */
*pbuflen = 0;
goto qsec_out;
}
/* BB check that data area is minimum length and as big as acl_len */
acl_len = le32_to_cpu(*parm);
if (acl_len != *pbuflen) {
cERROR(1, "acl length %d does not match %d",
acl_len, *pbuflen);
if (*pbuflen > acl_len)
*pbuflen = acl_len;
}
/* check if buffer is big enough for the acl
header followed by the smallest SID */
if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
(*pbuflen >= 64 * 1024)) {
cERROR(1, "bad acl length %d", *pbuflen);
rc = -EINVAL;
*pbuflen = 0;
} else {
*acl_inf = kmalloc(*pbuflen, GFP_KERNEL);
if (*acl_inf == NULL) {
*pbuflen = 0;
rc = -ENOMEM;
}
memcpy(*acl_inf, pdata, *pbuflen);
}
}
qsec_out:
if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
return rc;
}
int
CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd *pntsd, __u32 acllen)
{
__u16 byte_count, param_count, data_count, param_offset, data_offset;
int rc = 0;
int bytes_returned = 0;
SET_SEC_DESC_REQ *pSMB = NULL;
NTRANSACT_RSP *pSMBr = NULL;
setCifsAclRetry:
rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return (rc);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
param_count = 8;
param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4;
data_count = acllen;
data_offset = param_offset + param_count;
byte_count = 3 /* pad */ + param_count;
pSMB->DataCount = cpu_to_le32(data_count);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->MaxParameterCount = cpu_to_le32(4);
pSMB->MaxDataCount = cpu_to_le32(16384);
pSMB->ParameterCount = cpu_to_le32(param_count);
pSMB->ParameterOffset = cpu_to_le32(param_offset);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->DataOffset = cpu_to_le32(data_offset);
pSMB->SetupCount = 0;
pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC);
pSMB->ByteCount = cpu_to_le16(byte_count+data_count);
pSMB->Fid = fid; /* file handle always le */
pSMB->Reserved2 = 0;
pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL);
if (pntsd && acllen) {
memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
(char *) pntsd,
acllen);
inc_rfc1001_len(pSMB, byte_count + data_count);
} else
inc_rfc1001_len(pSMB, byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cFYI(1, "SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc);
if (rc)
cFYI(1, "Set CIFS ACL returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto setCifsAclRetry;
return (rc);
}
#endif /* CONFIG_CIFS_ACL */
/* Legacy Query Path Information call for lookup to old servers such
as Win9x/WinME */
int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *pFinfo,
const struct nls_table *nls_codepage, int remap)
{
QUERY_INFORMATION_REQ *pSMB;
QUERY_INFORMATION_RSP *pSMBr;
int rc = 0;
int bytes_returned;
int name_len;
cFYI(1, "In SMBQPath path %s", searchName);
QInfRetry:
rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else {
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
pSMB->BufferFormat = 0x04;
name_len++; /* account for buffer type byte */
inc_rfc1001_len(pSMB, (__u16)name_len);
pSMB->ByteCount = cpu_to_le16(name_len);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QueryInfo = %d", rc);
} else if (pFinfo) {
struct timespec ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
/* decode response */
/* BB FIXME - add time zone adjustment BB */
memset(pFinfo, 0, sizeof(FILE_ALL_INFO));
ts.tv_nsec = 0;
ts.tv_sec = time;
/* decode time fields */
pFinfo->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts));
pFinfo->LastWriteTime = pFinfo->ChangeTime;
pFinfo->LastAccessTime = 0;
pFinfo->AllocationSize =
cpu_to_le64(le32_to_cpu(pSMBr->size));
pFinfo->EndOfFile = pFinfo->AllocationSize;
pFinfo->Attributes =
cpu_to_le32(le16_to_cpu(pSMBr->attr));
} else
rc = -EIO; /* bad buffer passed in */
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QInfRetry;
return rc;
}
int
CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
struct smb_t2_qfi_rsp *pSMBr = NULL;
int rc = 0;
int bytes_returned;
__u16 params, byte_count;
QFileInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2 /* level */ + 2 /* fid */;
pSMB->t2.TotalDataCount = 0;
pSMB->t2.MaxParameterCount = cpu_to_le16(4);
/* BB find exact max data count below from sess structure BB */
pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->t2.MaxSetupCount = 0;
pSMB->t2.Reserved = 0;
pSMB->t2.Flags = 0;
pSMB->t2.Timeout = 0;
pSMB->t2.Reserved2 = 0;
pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
Fid) - 4);
pSMB->t2.DataCount = 0;
pSMB->t2.DataOffset = 0;
pSMB->t2.SetupCount = 1;
pSMB->t2.Reserved3 = 0;
pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->t2.TotalParameterCount = cpu_to_le16(params);
pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
pSMB->Pad = 0;
pSMB->Fid = netfid;
inc_rfc1001_len(pSMB, byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc) /* BB add auto retry on EOPNOTSUPP? */
rc = -EIO;
else if (get_bcc(&pSMBr->hdr) < 40)
rc = -EIO; /* bad smb */
else if (pFindData) {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
memcpy((char *) pFindData,
(char *) &pSMBr->hdr.Protocol +
data_offset, sizeof(FILE_ALL_INFO));
} else
rc = -ENOMEM;
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFileInfoRetry;
return rc;
}
int
CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_ALL_INFO *pFindData,
int legacy /* old style infolevel */,
const struct nls_table *nls_codepage, int remap)
{
/* level 263 SMB_QUERY_FILE_ALL_INFO */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
__u16 params, byte_count;
/* cFYI(1, "In QPathInfo path %s", searchName); */
QPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
if (legacy)
pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
else
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc) /* BB add auto retry on EOPNOTSUPP? */
rc = -EIO;
else if (!legacy && get_bcc(&pSMBr->hdr) < 40)
rc = -EIO; /* bad smb */
else if (legacy && get_bcc(&pSMBr->hdr) < 24)
rc = -EIO; /* 24 or 26 expected but we do not read
last field */
else if (pFindData) {
int size;
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
/* On legacy responses we do not read the last field,
EAsize, fortunately since it varies by subdialect and
also note it differs on Set vs. Get, ie two bytes or 4
bytes depending but we don't care here */
if (legacy)
size = sizeof(FILE_INFO_STANDARD);
else
size = sizeof(FILE_ALL_INFO);
memcpy((char *) pFindData,
(char *) &pSMBr->hdr.Protocol +
data_offset, size);
} else
rc = -ENOMEM;
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QPathInfoRetry;
return rc;
}
int
CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
struct smb_t2_qfi_rsp *pSMBr = NULL;
int rc = 0;
int bytes_returned;
__u16 params, byte_count;
UnixQFileInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2 /* level */ + 2 /* fid */;
pSMB->t2.TotalDataCount = 0;
pSMB->t2.MaxParameterCount = cpu_to_le16(4);
/* BB find exact max data count below from sess structure BB */
pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->t2.MaxSetupCount = 0;
pSMB->t2.Reserved = 0;
pSMB->t2.Flags = 0;
pSMB->t2.Timeout = 0;
pSMB->t2.Reserved2 = 0;
pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
Fid) - 4);
pSMB->t2.DataCount = 0;
pSMB->t2.DataOffset = 0;
pSMB->t2.SetupCount = 1;
pSMB->t2.Reserved3 = 0;
pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->t2.TotalParameterCount = cpu_to_le16(params);
pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
pSMB->Pad = 0;
pSMB->Fid = netfid;
inc_rfc1001_len(pSMB, byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
"Unix Extensions can be disabled on mount "
"by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
memcpy((char *) pFindData,
(char *) &pSMBr->hdr.Protocol +
data_offset,
sizeof(FILE_UNIX_BASIC_INFO));
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto UnixQFileInfoRetry;
return rc;
}
int
CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap)
{
/* SMB_QUERY_FILE_UNIX_BASIC */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned = 0;
int name_len;
__u16 params, byte_count;
cFYI(1, "In QPathInfo (Unix) the path %s", searchName);
UnixQPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
"Unix Extensions can be disabled on mount "
"by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
memcpy((char *) pFindData,
(char *) &pSMBr->hdr.Protocol +
data_offset,
sizeof(FILE_UNIX_BASIC_INFO));
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto UnixQPathInfoRetry;
return rc;
}
/* xid, tcon, searchName and codepage are input parms, rest are returned */
int
CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName,
const struct nls_table *nls_codepage,
__u16 *pnetfid,
struct cifs_search_info *psrch_inf, int remap, const char dirsep)
{
/* level 257 SMB_ */
TRANSACTION2_FFIRST_REQ *pSMB = NULL;
TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
T2_FFIRST_RSP_PARMS *parms;
int rc = 0;
int bytes_returned = 0;
int name_len;
__u16 params, byte_count;
cFYI(1, "In FindFirst for %s", searchName);
findFirstRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
/* We can not add the asterik earlier in case
it got remapped to 0xF03A as if it were part of the
directory name instead of a wildcard */
name_len *= 2;
pSMB->FileName[name_len] = dirsep;
pSMB->FileName[name_len+1] = 0;
pSMB->FileName[name_len+2] = '*';
pSMB->FileName[name_len+3] = 0;
name_len += 4; /* now the trailing null */
pSMB->FileName[name_len] = 0; /* null terminate just in case */
pSMB->FileName[name_len+1] = 0;
name_len += 2;
} else { /* BB add check for overrun of SMB buf BB */
name_len = strnlen(searchName, PATH_MAX);
/* BB fix here and in unicode clause above ie
if (name_len > buffersize-header)
free buffer exit; BB */
strncpy(pSMB->FileName, searchName, name_len);
pSMB->FileName[name_len] = dirsep;
pSMB->FileName[name_len+1] = '*';
pSMB->FileName[name_len+2] = 0;
name_len += 3;
}
params = 12 + name_len /* includes null */ ;
pSMB->TotalDataCount = 0; /* no EAs */
pSMB->MaxParameterCount = cpu_to_le16(10);
pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf -
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(
offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
- 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
CIFS_SEARCH_RETURN_RESUME);
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
/* BB what should we set StorageType to? Does it matter? BB */
pSMB->SearchStorageType = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_ffirst);
if (rc) {/* BB add logic to retry regular search if Unix search
rejected unexpectedly by server */
/* BB Add code to handle unsupported level rc */
cFYI(1, "Error in FindFirst = %d", rc);
cifs_buf_release(pSMB);
/* BB eventually could optimize out free and realloc of buf */
/* for this case */
if (rc == -EAGAIN)
goto findFirstRetry;
} else { /* decode response */
/* BB remember to free buffer if error BB */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc == 0) {
unsigned int lnoff;
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
psrch_inf->unicode = true;
else
psrch_inf->unicode = false;
psrch_inf->ntwrk_buf_start = (char *)pSMBr;
psrch_inf->smallBuf = 0;
psrch_inf->srch_entries_start =
(char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.ParameterOffset));
if (parms->EndofSearch)
psrch_inf->endOfSearch = true;
else
psrch_inf->endOfSearch = false;
psrch_inf->entries_in_buffer =
le16_to_cpu(parms->SearchCount);
psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
psrch_inf->entries_in_buffer;
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
}
psrch_inf->last_entry = psrch_inf->srch_entries_start +
lnoff;
*pnetfid = parms->SearchHandle;
} else {
cifs_buf_release(pSMB);
}
}
return rc;
}
int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
__u16 searchHandle, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
T2_FNEXT_RSP_PARMS *parms;
char *response_data;
int rc = 0;
int bytes_returned, name_len;
__u16 params, byte_count;
cFYI(1, "In FindNext");
if (psrch_inf->endOfSearch)
return -ENOENT;
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 14; /* includes 2 bytes of null string, converted to LE below*/
byte_count = 0;
pSMB->TotalDataCount = 0; /* no EAs */
pSMB->MaxParameterCount = cpu_to_le16(8);
pSMB->MaxDataCount =
cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
0xFFFFFF00);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(
offsetof(struct smb_com_transaction2_fnext_req,SearchHandle) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_NEXT);
pSMB->SearchHandle = searchHandle; /* always kept as le */
pSMB->SearchCount =
cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
pSMB->ResumeKey = psrch_inf->resume_key;
pSMB->SearchFlags =
cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
name_len = psrch_inf->resume_name_len;
params += name_len;
if (name_len < PATH_MAX) {
memcpy(pSMB->ResumeFileName, psrch_inf->presume_name, name_len);
byte_count += name_len;
/* 14 byte parm len above enough for 2 byte null terminator */
pSMB->ResumeFileName[name_len] = 0;
pSMB->ResumeFileName[name_len+1] = 0;
} else {
rc = -EINVAL;
goto FNext2_err_exit;
}
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_fnext);
if (rc) {
if (rc == -EBADF) {
psrch_inf->endOfSearch = true;
cifs_buf_release(pSMB);
rc = 0; /* search probably was closed at end of search*/
} else
cFYI(1, "FindNext returned = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc == 0) {
unsigned int lnoff;
/* BB fixme add lock for file (srch_info) struct here */
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
psrch_inf->unicode = true;
else
psrch_inf->unicode = false;
response_data = (char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.ParameterOffset);
parms = (T2_FNEXT_RSP_PARMS *)response_data;
response_data = (char *)&pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
if (psrch_inf->smallBuf)
cifs_small_buf_release(
psrch_inf->ntwrk_buf_start);
else
cifs_buf_release(psrch_inf->ntwrk_buf_start);
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
psrch_inf->smallBuf = 0;
if (parms->EndofSearch)
psrch_inf->endOfSearch = true;
else
psrch_inf->endOfSearch = false;
psrch_inf->entries_in_buffer =
le16_to_cpu(parms->SearchCount);
psrch_inf->index_of_last_entry +=
psrch_inf->entries_in_buffer;
lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
lnoff) {
cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL;
return rc;
} else
psrch_inf->last_entry =
psrch_inf->srch_entries_start + lnoff;
/* cFYI(1, "fnxt2 entries in buf %d index_of_last %d",
psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
/* BB fixme add unlock here */
}
}
/* BB On error, should we leave previous search buf (and count and
last entry fields) intact or free the previous one? */
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
FNext2_err_exit:
if (rc != 0)
cifs_buf_release(pSMB);
return rc;
}
int
CIFSFindClose(const int xid, struct cifs_tcon *tcon,
const __u16 searchHandle)
{
int rc = 0;
FINDCLOSE_REQ *pSMB = NULL;
cFYI(1, "In CIFSSMBFindClose");
rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
/* no sense returning error if session restarted
as file handle has been closed */
if (rc == -EAGAIN)
return 0;
if (rc)
return rc;
pSMB->FileID = searchHandle;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
cERROR(1, "Send error in FindClose = %d", rc);
cifs_stats_inc(&tcon->num_fclose);
/* Since session is dead, search handle closed on server already */
if (rc == -EAGAIN)
rc = 0;
return rc;
}
int
CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
__u64 *inode_number,
const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int name_len, bytes_returned;
__u16 params, byte_count;
cFYI(1, "In GetSrvInodeNum for %s", searchName);
if (tcon == NULL)
return -ENODEV;
GetInodeNumberRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max data count below from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_INTERNAL_INFO);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "error %d in QueryInternalInfo", rc);
} else {
/* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
/* BB also check enough total bytes returned */
if (rc || get_bcc(&pSMBr->hdr) < 2)
/* If rc should we check for EOPNOSUPP and
disable the srvino flag? or in caller? */
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
__u16 count = le16_to_cpu(pSMBr->t2.DataCount);
struct file_internal_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count < 8) {
cFYI(1, "Illegal size ret in QryIntrnlInf");
rc = -EIO;
goto GetInodeNumOut;
}
pfinfo = (struct file_internal_info *)
(data_offset + (char *) &pSMBr->hdr.Protocol);
*inode_number = le64_to_cpu(pfinfo->UniqueId);
}
}
GetInodeNumOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto GetInodeNumberRetry;
return rc;
}
/* parses DFS refferal V3 structure
* caller is responsible for freeing target_nodes
* returns:
* on success - 0
* on failure - errno
*/
static int
parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
unsigned int *num_of_nodes,
struct dfs_info3_param **target_nodes,
const struct nls_table *nls_codepage, int remap,
const char *searchName)
{
int i, rc = 0;
char *data_end;
bool is_unicode;
struct dfs_referral_level_3 *ref;
if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
else
is_unicode = false;
*num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals);
if (*num_of_nodes < 1) {
cERROR(1, "num_referrals: must be at least > 0,"
"but we get num_referrals = %d\n", *num_of_nodes);
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals);
if (ref->VersionNumber != cpu_to_le16(3)) {
cERROR(1, "Referrals of V%d version are not supported,"
"should be V3", le16_to_cpu(ref->VersionNumber));
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
/* get the upper boundary of the resp buffer */
data_end = (char *)(&(pSMBr->PathConsumed)) +
le16_to_cpu(pSMBr->t2.DataCount);
cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n",
*num_of_nodes,
le32_to_cpu(pSMBr->DFSFlags));
*target_nodes = kzalloc(sizeof(struct dfs_info3_param) *
*num_of_nodes, GFP_KERNEL);
if (*target_nodes == NULL) {
cERROR(1, "Failed to allocate buffer for target_nodes\n");
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
}
/* collect necessary data from referrals */
for (i = 0; i < *num_of_nodes; i++) {
char *temp;
int max_len;
struct dfs_info3_param *node = (*target_nodes)+i;
node->flags = le32_to_cpu(pSMBr->DFSFlags);
if (is_unicode) {
__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
GFP_KERNEL);
if (tmp == NULL) {
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
}
cifsConvertToUCS((__le16 *) tmp, searchName,
PATH_MAX, nls_codepage, remap);
node->path_consumed = cifs_ucs2_bytes(tmp,
le16_to_cpu(pSMBr->PathConsumed),
nls_codepage);
kfree(tmp);
} else
node->path_consumed = le16_to_cpu(pSMBr->PathConsumed);
node->server_type = le16_to_cpu(ref->ServerType);
node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
/* copy DfsPath */
temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
max_len = data_end - temp;
node->path_name = cifs_strndup_from_ucs(temp, max_len,
is_unicode, nls_codepage);
if (!node->path_name) {
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
}
/* copy link target UNC */
temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
max_len = data_end - temp;
node->node_name = cifs_strndup_from_ucs(temp, max_len,
is_unicode, nls_codepage);
if (!node->node_name)
rc = -ENOMEM;
}
parse_DFS_referrals_exit:
if (rc) {
free_dfs_info_array(*target_nodes, *num_of_nodes);
*target_nodes = NULL;
*num_of_nodes = 0;
}
return rc;
}
int
CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
const unsigned char *searchName,
struct dfs_info3_param **target_nodes,
unsigned int *num_of_nodes,
const struct nls_table *nls_codepage, int remap)
{
/* TRANS2_GET_DFS_REFERRAL */
TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
__u16 params, byte_count;
*num_of_nodes = 0;
*target_nodes = NULL;
cFYI(1, "In GetDFSRefer the path %s", searchName);
if (ses == NULL)
return -ENODEV;
getDFSRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
/* server pointer checked in called function,
but should never be null here anyway */
pSMB->hdr.Mid = GetNextMid(ses->server);
pSMB->hdr.Tid = ses->ipc_tid;
pSMB->hdr.Uid = ses->Suid;
if (ses->capabilities & CAP_STATUS32)
pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
if (ses->capabilities & CAP_DFS)
pSMB->hdr.Flags2 |= SMBFLG2_DFS;
if (ses->capabilities & CAP_UNICODE) {
pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
name_len =
cifsConvertToUCS((__le16 *) pSMB->RequestFileName,
searchName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->RequestFileName, searchName, name_len);
}
if (ses->server) {
if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
pSMB->hdr.Uid = ses->Suid;
params = 2 /* level */ + name_len /*includes null */ ;
pSMB->TotalDataCount = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->MaxParameterCount = 0;
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_get_dfs_refer_req, MaxReferralLevel) - 4);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_GET_DFS_REFERRAL);
byte_count = params + 3 /* pad */ ;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->MaxReferralLevel = cpu_to_le16(3);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in GetDFSRefer = %d", rc);
goto GetDFSRefExit;
}
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
/* BB Also check if enough total bytes returned? */
if (rc || get_bcc(&pSMBr->hdr) < 17) {
rc = -EIO; /* bad smb */
goto GetDFSRefExit;
}
cFYI(1, "Decoding GetDFSRefer response BCC: %d Offset %d",
get_bcc(&pSMBr->hdr),
le16_to_cpu(pSMBr->t2.DataOffset));
/* parse returned result into more usable form */
rc = parse_DFS_referrals(pSMBr, num_of_nodes,
target_nodes, nls_codepage, remap,
searchName);
GetDFSRefExit:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto getDFSRetry;
return rc;
}
/* Query File System Info such as free space to old servers such as Win 9x */
int
SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
{
/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
TRANSACTION2_QFSI_RSP *pSMBr = NULL;
FILE_SYSTEM_ALLOC_INFO *response_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count;
cFYI(1, "OldQFSInfo");
oldQFSInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_INFO_ALLOCATION);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QFSInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 18)
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
cFYI(1, "qfsinf resp BCC: %d Offset %d",
get_bcc(&pSMBr->hdr), data_offset);
response_data = (FILE_SYSTEM_ALLOC_INFO *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
FSData->f_bsize =
le16_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
FSData->f_blocks =
le32_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le32_to_cpu(response_data->FreeAllocationUnits);
cFYI(1, "Blocks: %lld Free: %lld Block size %ld",
(unsigned long long)FSData->f_blocks,
(unsigned long long)FSData->f_bfree,
FSData->f_bsize);
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto oldQFSInfoRetry;
return rc;
}
int
CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
{
/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
TRANSACTION2_QFSI_RSP *pSMBr = NULL;
FILE_SYSTEM_INFO *response_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count;
cFYI(1, "In QFSInfo");
QFSInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_SIZE_INFO);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QFSInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 24)
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
response_data =
(FILE_SYSTEM_INFO
*) (((char *) &pSMBr->hdr.Protocol) +
data_offset);
FSData->f_bsize =
le32_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
FSData->f_blocks =
le64_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
le64_to_cpu(response_data->FreeAllocationUnits);
cFYI(1, "Blocks: %lld Free: %lld Block size %ld",
(unsigned long long)FSData->f_blocks,
(unsigned long long)FSData->f_bfree,
FSData->f_bsize);
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFSInfoRetry;
return rc;
}
int
CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
TRANSACTION2_QFSI_RSP *pSMBr = NULL;
FILE_SYSTEM_ATTRIBUTE_INFO *response_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count;
cFYI(1, "In QFSAttributeInfo");
QFSAttributeRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_ATTRIBUTE_INFO);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cERROR(1, "Send error in QFSAttributeInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 13) {
/* BB also check if enough bytes returned */
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
response_data =
(FILE_SYSTEM_ATTRIBUTE_INFO
*) (((char *) &pSMBr->hdr.Protocol) +
data_offset);
memcpy(&tcon->fsAttrInfo, response_data,
sizeof(FILE_SYSTEM_ATTRIBUTE_INFO));
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFSAttributeRetry;
return rc;
}
int
CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
TRANSACTION2_QFSI_RSP *pSMBr = NULL;
FILE_SYSTEM_DEVICE_INFO *response_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count;
cFYI(1, "In QFSDeviceInfo");
QFSDeviceRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FS_DEVICE_INFO);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QFSDeviceInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) <
sizeof(FILE_SYSTEM_DEVICE_INFO))
rc = -EIO; /* bad smb */
else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
response_data =
(FILE_SYSTEM_DEVICE_INFO *)
(((char *) &pSMBr->hdr.Protocol) +
data_offset);
memcpy(&tcon->fsDevInfo, response_data,
sizeof(FILE_SYSTEM_DEVICE_INFO));
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFSDeviceRetry;
return rc;
}
int
CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
TRANSACTION2_QFSI_RSP *pSMBr = NULL;
FILE_SYSTEM_UNIX_INFO *response_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count;
cFYI(1, "In QFSUnixInfo");
QFSUnixRetry:
rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
(void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(100);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_CIFS_UNIX_INFO);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cERROR(1, "Send error in QFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 13) {
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
response_data =
(FILE_SYSTEM_UNIX_INFO
*) (((char *) &pSMBr->hdr.Protocol) +
data_offset);
memcpy(&tcon->fsUnixInfo, response_data,
sizeof(FILE_SYSTEM_UNIX_INFO));
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFSUnixRetry;
return rc;
}
int
CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap)
{
/* level 0x200 SMB_SET_CIFS_UNIX_INFO */
TRANSACTION2_SETFSI_REQ *pSMB = NULL;
TRANSACTION2_SETFSI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count;
cFYI(1, "In SETFSUnixInfo");
SETFSUnixRetry:
/* BB switch to small buf init to save memory */
rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
(void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
params = 4; /* 2 bytes zero followed by info level. */
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_setfsi_req, FileNum)
- 4;
offset = param_offset + params;
pSMB->MaxParameterCount = cpu_to_le16(4);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(100);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION);
byte_count = 1 /* pad */ + params + 12;
pSMB->DataCount = cpu_to_le16(12);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
/* Params. */
pSMB->FileNum = 0;
pSMB->InformationLevel = cpu_to_le16(SMB_SET_CIFS_UNIX_INFO);
/* Data. */
pSMB->ClientUnixMajor = cpu_to_le16(CIFS_UNIX_MAJOR_VERSION);
pSMB->ClientUnixMinor = cpu_to_le16(CIFS_UNIX_MINOR_VERSION);
pSMB->ClientUnixCap = cpu_to_le64(cap);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cERROR(1, "Send error in SETFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc)
rc = -EIO; /* bad smb */
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto SETFSUnixRetry;
return rc;
}
int
CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData)
{
/* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
TRANSACTION2_QFSI_RSP *pSMBr = NULL;
FILE_SYSTEM_POSIX_INFO *response_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count;
cFYI(1, "In QFSPosixInfo");
QFSPosixRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(100);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
byte_count = params + 1 /* pad */ ;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(offsetof(struct
smb_com_transaction2_qfsi_req, InformationLevel) - 4);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FS_INFORMATION);
pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_POSIX_FS_INFO);
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QFSUnixInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 13) {
rc = -EIO; /* bad smb */
} else {
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
response_data =
(FILE_SYSTEM_POSIX_INFO
*) (((char *) &pSMBr->hdr.Protocol) +
data_offset);
FSData->f_bsize =
le32_to_cpu(response_data->BlockSize);
FSData->f_blocks =
le64_to_cpu(response_data->TotalBlocks);
FSData->f_bfree =
le64_to_cpu(response_data->BlocksAvail);
if (response_data->UserBlocksAvail == cpu_to_le64(-1)) {
FSData->f_bavail = FSData->f_bfree;
} else {
FSData->f_bavail =
le64_to_cpu(response_data->UserBlocksAvail);
}
if (response_data->TotalFileNodes != cpu_to_le64(-1))
FSData->f_files =
le64_to_cpu(response_data->TotalFileNodes);
if (response_data->FreeFileNodes != cpu_to_le64(-1))
FSData->f_ffree =
le64_to_cpu(response_data->FreeFileNodes);
}
}
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QFSPosixRetry;
return rc;
}
/* We can not use write of zero bytes trick to
set file size due to need for large file support. Also note that
this SetPathInfo is preferred to SetFileInfo based method in next
routine which is only needed to work around a sharing violation bug
in Samba which this routine can run into */
int
CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName,
__u64 size, bool SetAllocation,
const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
struct file_end_of_file_info *parm_data;
int name_len;
int rc = 0;
int bytes_returned = 0;
__u16 params, byte_count, data_count, param_offset, offset;
cFYI(1, "In SetEOF");
SetEOFRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
}
params = 6 + name_len;
data_count = sizeof(struct file_end_of_file_info);
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(4100);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
if (SetAllocation) {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
else
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
} else /* Set File Size */ {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
else
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
}
parm_data =
(struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol) +
offset);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + data_count;
pSMB->DataCount = cpu_to_le16(data_count);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
parm_data->FileSize = cpu_to_le64(size);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "SetPathInfo (file size) returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto SetEOFRetry;
return rc;
}
int
CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
__u16 fid, __u32 pid_of_opener, bool SetAllocation)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct file_end_of_file_info *parm_data;
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
cFYI(1, "SetFileSize (via SetFileInfo) %lld",
(long long)size);
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
count = sizeof(struct file_end_of_file_info);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
parm_data =
(struct file_end_of_file_info *) (((char *) &pSMB->hdr.Protocol)
+ offset);
pSMB->DataOffset = cpu_to_le16(offset);
parm_data->FileSize = cpu_to_le64(size);
pSMB->Fid = fid;
if (SetAllocation) {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO2);
else
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_ALLOCATION_INFO);
} else /* Set File Size */ {
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO2);
else
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_END_OF_FILE_INFO);
}
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc) {
cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
}
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
/* Some legacy servers such as NT4 require that the file times be set on
an open handle, rather than by pathname - this is awkward due to
potential access conflicts on the open, but it is unavoidable for these
old servers since the only other choice is to go from 100 nanosecond DCE
time and resort to the original setpathinfo level which takes the ancient
DOS time format with 2 second granularity */
int
CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
char *data_offset;
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
cFYI(1, "Set Times (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
count = sizeof(FILE_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->Fid = fid;
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
else
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
bool delete_file, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
char *data_offset;
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
cFYI(1, "Set File Disposition (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
count = 1;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->Fid = fid;
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_DISPOSITION_INFO);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
*data_offset = delete_file ? 1 : 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
cFYI(1, "Send error in SetFileDisposition = %d", rc);
return rc;
}
int
CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
int name_len;
int rc = 0;
int bytes_returned = 0;
char *data_offset;
__u16 params, param_offset, offset, byte_count, count;
cFYI(1, "In SetTimes");
SetTimesRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
}
params = 6 + name_len;
count = sizeof(FILE_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
else
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "SetPathInfo (times) returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto SetTimesRetry;
return rc;
}
/* Can not be used to set time stamps yet (due to old DOS time format) */
/* Can be used to set attributes */
#if 0 /* Possibly not needed - since it turns out that strangely NT4 has a bug
handling it anyway and NT4 was what we thought it would be needed for
Do not delete it until we prove whether needed for Win9x though */
int
CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName,
__u16 dos_attrs, const struct nls_table *nls_codepage)
{
SETATTR_REQ *pSMB = NULL;
SETATTR_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
cFYI(1, "In SetAttrLegacy");
SetAttrLgcyRetry:
rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
ConvertToUCS((__le16 *) pSMB->fileName, fileName,
PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->fileName, fileName, name_len);
}
pSMB->attr = cpu_to_le16(dos_attrs);
pSMB->BufferFormat = 0x04;
inc_rfc1001_len(pSMB, name_len + 1);
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "Error in LegacySetAttr = %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto SetAttrLgcyRetry;
return rc;
}
#endif /* temporarily unneeded SetAttr legacy function */
static void
cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
const struct cifs_unix_set_info_args *args)
{
u64 mode = args->mode;
/*
* Samba server ignores set of file size to zero due to bugs in some
* older clients, but we should be precise - we use SetFileSize to
* set file size and do not want to truncate file size to zero
* accidentally as happened on one Samba server beta by putting
* zero instead of -1 here
*/
data_offset->EndOfFile = cpu_to_le64(NO_CHANGE_64);
data_offset->NumOfBytes = cpu_to_le64(NO_CHANGE_64);
data_offset->LastStatusChange = cpu_to_le64(args->ctime);
data_offset->LastAccessTime = cpu_to_le64(args->atime);
data_offset->LastModificationTime = cpu_to_le64(args->mtime);
data_offset->Uid = cpu_to_le64(args->uid);
data_offset->Gid = cpu_to_le64(args->gid);
/* better to leave device as zero when it is */
data_offset->DevMajor = cpu_to_le64(MAJOR(args->device));
data_offset->DevMinor = cpu_to_le64(MINOR(args->device));
data_offset->Permissions = cpu_to_le64(mode);
if (S_ISREG(mode))
data_offset->Type = cpu_to_le32(UNIX_FILE);
else if (S_ISDIR(mode))
data_offset->Type = cpu_to_le32(UNIX_DIR);
else if (S_ISLNK(mode))
data_offset->Type = cpu_to_le32(UNIX_SYMLINK);
else if (S_ISCHR(mode))
data_offset->Type = cpu_to_le32(UNIX_CHARDEV);
else if (S_ISBLK(mode))
data_offset->Type = cpu_to_le32(UNIX_BLOCKDEV);
else if (S_ISFIFO(mode))
data_offset->Type = cpu_to_le32(UNIX_FIFO);
else if (S_ISSOCK(mode))
data_offset->Type = cpu_to_le32(UNIX_SOCKET);
}
int
CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
FILE_UNIX_BASIC_INFO *data_offset;
int rc = 0;
u16 params, param_offset, offset, byte_count, count;
cFYI(1, "Set Unix Info (via SetFileInfo)");
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
data_offset = (FILE_UNIX_BASIC_INFO *)
((char *)(&pSMB->hdr.Protocol) + offset);
count = sizeof(FILE_UNIX_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->Fid = fid;
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
cifs_fill_unix_set_info(data_offset, args);
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
if (rc)
cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
int name_len;
int rc = 0;
int bytes_returned = 0;
FILE_UNIX_BASIC_INFO *data_offset;
__u16 params, param_offset, offset, count, byte_count;
cFYI(1, "In SetUID/GID/Mode");
setPermsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
}
params = 6 + name_len;
count = sizeof(FILE_UNIX_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
data_offset =
(FILE_UNIX_BASIC_INFO *) ((char *) &pSMB->hdr.Protocol +
offset);
memset(data_offset, 0, count);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->DataCount = cpu_to_le16(count);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
cifs_fill_unix_set_info(data_offset, args);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "SetPathInfo (perms) returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto setPermsRetry;
return rc;
}
#ifdef CONFIG_CIFS_XATTR
/*
* Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
* function used by listxattr and getxattr type calls. When ea_name is set,
* it looks for that attribute name and stuffs that value into the EAData
* buffer. When ea_name is NULL, it stuffs a list of attribute names into the
* buffer. In both cases, the return value is either the length of the
* resulting data or a negative error code. If EAData is a NULL pointer then
* the data isn't copied to it, but the length is returned.
*/
ssize_t
CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *ea_name,
char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int list_len;
struct fealist *ea_response_data;
struct fea *temp_fea;
char *temp_ptr;
char *end_of_smb;
__u16 params, byte_count, data_offset;
unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
list_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
list_len++; /* trailing null */
list_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
list_len = strnlen(searchName, PATH_MAX);
list_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, list_len);
}
params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
pSMB->ParameterOffset = cpu_to_le16(offsetof(
struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QueryAllEAs = %d", rc);
goto QAllEAsOut;
}
/* BB also check enough total bytes returned */
/* BB we need to improve the validity checking
of these trans2 responses */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < 4) {
rc = -EIO; /* bad smb */
goto QAllEAsOut;
}
/* check that length of list is not more than bcc */
/* check that each entry does not go beyond length
of list */
/* check that each element of each entry does not
go beyond end of list */
/* validate_trans2_offsets() */
/* BB check if start of smb + data_offset > &bcc+ bcc */
data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) + data_offset);
list_len = le32_to_cpu(ea_response_data->list_len);
cFYI(1, "ea length %d", list_len);
if (list_len <= 8) {
cFYI(1, "empty EA list returned from server");
goto QAllEAsOut;
}
/* make sure list_len doesn't go past end of SMB */
end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
if ((char *)ea_response_data + list_len > end_of_smb) {
cFYI(1, "EA list appears to go beyond SMB");
rc = -EIO;
goto QAllEAsOut;
}
/* account for ea list len */
list_len -= 4;
temp_fea = ea_response_data->list;
temp_ptr = (char *)temp_fea;
while (list_len > 0) {
unsigned int name_len;
__u16 value_len;
list_len -= 4;
temp_ptr += 4;
/* make sure we can read name_len and value_len */
if (list_len < 0) {
cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
name_len = temp_fea->name_len;
value_len = le16_to_cpu(temp_fea->value_len);
list_len -= name_len + 1 + value_len;
if (list_len < 0) {
cFYI(1, "EA entry goes beyond length of list");
rc = -EIO;
goto QAllEAsOut;
}
if (ea_name) {
if (ea_name_len == name_len &&
strncmp(ea_name, temp_ptr, name_len) == 0) {
temp_ptr += name_len + 1;
rc = value_len;
if (buf_size == 0)
goto QAllEAsOut;
if ((size_t)value_len > buf_size) {
rc = -ERANGE;
goto QAllEAsOut;
}
memcpy(EAData, temp_ptr, value_len);
goto QAllEAsOut;
}
} else {
/* account for prefix user. and trailing null */
rc += (5 + 1 + name_len);
if (rc < (int) buf_size) {
memcpy(EAData, "user.", 5);
EAData += 5;
memcpy(EAData, temp_ptr, name_len);
EAData += name_len;
/* null terminate name */
*EAData = 0;
++EAData;
} else if (buf_size == 0) {
/* skip copy - calc size only */
} else {
/* stop before overrun buffer */
rc = -ERANGE;
break;
}
}
temp_ptr += name_len + 1 + value_len;
temp_fea = (struct fea *)temp_ptr;
}
/* didn't find the named attribute */
if (ea_name)
rc = -ENODATA;
QAllEAsOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QAllEAsRetry;
return (ssize_t)rc;
}
int
CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName,
const char *ea_name, const void *ea_value,
const __u16 ea_value_len, const struct nls_table *nls_codepage,
int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
struct fealist *parm_data;
int name_len;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, byte_count, offset, count;
cFYI(1, "In SetEA");
SetEARetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(fileName, PATH_MAX);
name_len++; /* trailing null */
strncpy(pSMB->FileName, fileName, name_len);
}
params = 6 + name_len;
/* done calculating parms using name_len of file name,
now use name_len to calculate length of ea name
we are going to create in the inode xattrs */
if (ea_name == NULL)
name_len = 0;
else
name_len = strnlen(ea_name, 255);
count = sizeof(*parm_data) + ea_value_len + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find max SMB PDU from sess */
pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_spi_req,
InformationLevel) - 4;
offset = param_offset + params;
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_EA);
parm_data =
(struct fealist *) (((char *) &pSMB->hdr.Protocol) +
offset);
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
parm_data->list_len = cpu_to_le32(count);
parm_data->list[0].EA_flags = 0;
/* we checked above that name len is less than 255 */
parm_data->list[0].name_len = (__u8)name_len;
/* EA names are always ASCII */
if (ea_name)
strncpy(parm_data->list[0].name, ea_name, name_len);
parm_data->list[0].name[name_len] = 0;
parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
/* caller ensures that ea_value_len is less than 64K but
we need to ensure that it fits within the smb */
/*BB add length check to see if it would fit in
negotiated SMB buffer size BB */
/* if (ea_value_len > buffer_size - 512 (enough for header)) */
if (ea_value_len)
memcpy(parm_data->list[0].name+name_len+1,
ea_value, ea_value_len);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->Reserved4 = 0;
inc_rfc1001_len(pSMB, byte_count);
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc)
cFYI(1, "SetPathInfo (EA) returned %d", rc);
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto SetEARetry;
return rc;
}
#endif
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* BB unused temporarily */
/*
* Years ago the kernel added a "dnotify" function for Samba server,
* to allow network clients (such as Windows) to display updated
* lists of files in directory listings automatically when
* files are added by one user when another user has the
* same directory open on their desktop. The Linux cifs kernel
* client hooked into the kernel side of this interface for
* the same reason, but ironically when the VFS moved from
* "dnotify" to "inotify" it became harder to plug in Linux
* network file system clients (the most obvious use case
* for notify interfaces is when multiple users can update
* the contents of the same directory - exactly what network
* file systems can do) although the server (Samba) could
* still use it. For the short term we leave the worker
* function ifdeffed out (below) until inotify is fixed
* in the VFS to make it easier to plug in network file
* system clients. If inotify turns out to be permanently
* incompatible for network fs clients, we could instead simply
* expose this config flag by adding a future cifs (and smb2) notify ioctl.
*/
int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, struct file *pfile, int multishot,
const struct nls_table *nls_codepage)
{
int rc = 0;
struct smb_com_transaction_change_notify_req *pSMB = NULL;
struct smb_com_ntransaction_change_notify_rsp *pSMBr = NULL;
struct dir_notify_req *dnotify_req;
int bytes_returned;
cFYI(1, "In CIFSSMBNotify for file handle %d", (int)netfid);
rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
pSMB->TotalParameterCount = 0 ;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le32(2);
/* BB find exact data count max from sess structure BB */
pSMB->MaxDataCount = 0; /* same in little endian or be */
/* BB VERIFY verify which is correct for above BB */
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0;
pSMB->ParameterOffset = 0;
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 4; /* single byte does not need le conversion */
pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_NOTIFY_CHANGE);
pSMB->ParameterCount = pSMB->TotalParameterCount;
if (notify_subdirs)
pSMB->WatchTree = 1; /* one byte - no le conversion needed */
pSMB->Reserved2 = 0;
pSMB->CompletionFilter = cpu_to_le32(filter);
pSMB->Fid = netfid; /* file handle always le */
pSMB->ByteCount = 0;
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned,
CIFS_ASYNC_OP);
if (rc) {
cFYI(1, "Error in Notify = %d", rc);
} else {
/* Add file to outstanding requests */
/* BB change to kmem cache alloc */
dnotify_req = kmalloc(
sizeof(struct dir_notify_req),
GFP_KERNEL);
if (dnotify_req) {
dnotify_req->Pid = pSMB->hdr.Pid;
dnotify_req->PidHigh = pSMB->hdr.PidHigh;
dnotify_req->Mid = pSMB->hdr.Mid;
dnotify_req->Tid = pSMB->hdr.Tid;
dnotify_req->Uid = pSMB->hdr.Uid;
dnotify_req->netfid = netfid;
dnotify_req->pfile = pfile;
dnotify_req->filter = filter;
dnotify_req->multishot = multishot;
spin_lock(&GlobalMid_Lock);
list_add_tail(&dnotify_req->lhead,
&GlobalDnotifyReqList);
spin_unlock(&GlobalMid_Lock);
} else
rc = -ENOMEM;
}
cifs_buf_release(pSMB);
return rc;
}
#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3497_0 |
crossvul-cpp_data_bad_3447_4 | /*
* sound/oss/sequencer.c
*
* The sequencer personality manager.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*/
/*
* Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
* Alan Cox : reformatted and fixed a pair of null pointer bugs
*/
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "midi_ctrl.h"
static int sequencer_ok;
static struct sound_timer_operations *tmr;
static int tmr_no = -1; /* Currently selected timer */
static int pending_timer = -1; /* For timer change operation */
extern unsigned long seq_time;
static int obsolete_api_used;
static DEFINE_SPINLOCK(lock);
/*
* Local counts for number of synth and MIDI devices. These are initialized
* by the sequencer_open.
*/
static int max_mididev;
static int max_synthdev;
/*
* The seq_mode gives the operating mode of the sequencer:
* 1 = level1 (the default)
* 2 = level2 (extended capabilities)
*/
#define SEQ_1 1
#define SEQ_2 2
static int seq_mode = SEQ_1;
static DECLARE_WAIT_QUEUE_HEAD(seq_sleeper);
static DECLARE_WAIT_QUEUE_HEAD(midi_sleeper);
static int midi_opened[MAX_MIDI_DEV];
static int midi_written[MAX_MIDI_DEV];
static unsigned long prev_input_time;
static int prev_event_time;
#include "tuning.h"
#define EV_SZ 8
#define IEV_SZ 8
static unsigned char *queue;
static unsigned char *iqueue;
static volatile int qhead, qtail, qlen;
static volatile int iqhead, iqtail, iqlen;
static volatile int seq_playing;
static volatile int sequencer_busy;
static int output_threshold;
static long pre_event_timeout;
static unsigned synth_open_mask;
static int seq_queue(unsigned char *note, char nonblock);
static void seq_startplay(void);
static int seq_sync(void);
static void seq_reset(void);
#if MAX_SYNTH_DEV > 15
#error Too many synthesizer devices enabled.
#endif
int sequencer_read(int dev, struct file *file, char __user *buf, int count)
{
int c = count, p = 0;
int ev_len;
unsigned long flags;
dev = dev >> 4;
ev_len = seq_mode == SEQ_1 ? 4 : 8;
spin_lock_irqsave(&lock,flags);
if (!iqlen)
{
spin_unlock_irqrestore(&lock,flags);
if (file->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
interruptible_sleep_on_timeout(&midi_sleeper,
pre_event_timeout);
spin_lock_irqsave(&lock,flags);
if (!iqlen)
{
spin_unlock_irqrestore(&lock,flags);
return 0;
}
}
while (iqlen && c >= ev_len)
{
char *fixit = (char *) &iqueue[iqhead * IEV_SZ];
spin_unlock_irqrestore(&lock,flags);
if (copy_to_user(&(buf)[p], fixit, ev_len))
return count - c;
p += ev_len;
c -= ev_len;
spin_lock_irqsave(&lock,flags);
iqhead = (iqhead + 1) % SEQ_MAX_QUEUE;
iqlen--;
}
spin_unlock_irqrestore(&lock,flags);
return count - c;
}
static void sequencer_midi_output(int dev)
{
/*
* Currently NOP
*/
}
void seq_copy_to_input(unsigned char *event_rec, int len)
{
unsigned long flags;
/*
* Verify that the len is valid for the current mode.
*/
if (len != 4 && len != 8)
return;
if ((seq_mode == SEQ_1) != (len == 4))
return;
if (iqlen >= (SEQ_MAX_QUEUE - 1))
return; /* Overflow */
spin_lock_irqsave(&lock,flags);
memcpy(&iqueue[iqtail * IEV_SZ], event_rec, len);
iqlen++;
iqtail = (iqtail + 1) % SEQ_MAX_QUEUE;
wake_up(&midi_sleeper);
spin_unlock_irqrestore(&lock,flags);
}
EXPORT_SYMBOL(seq_copy_to_input);
static void sequencer_midi_input(int dev, unsigned char data)
{
unsigned int tstamp;
unsigned char event_rec[4];
if (data == 0xfe) /* Ignore active sensing */
return;
tstamp = jiffies - seq_time;
if (tstamp != prev_input_time)
{
tstamp = (tstamp << 8) | SEQ_WAIT;
seq_copy_to_input((unsigned char *) &tstamp, 4);
prev_input_time = tstamp;
}
event_rec[0] = SEQ_MIDIPUTC;
event_rec[1] = data;
event_rec[2] = dev;
event_rec[3] = 0;
seq_copy_to_input(event_rec, 4);
}
void seq_input_event(unsigned char *event_rec, int len)
{
unsigned long this_time;
if (seq_mode == SEQ_2)
this_time = tmr->get_time(tmr_no);
else
this_time = jiffies - seq_time;
if (this_time != prev_input_time)
{
unsigned char tmp_event[8];
tmp_event[0] = EV_TIMING;
tmp_event[1] = TMR_WAIT_ABS;
tmp_event[2] = 0;
tmp_event[3] = 0;
*(unsigned int *) &tmp_event[4] = this_time;
seq_copy_to_input(tmp_event, 8);
prev_input_time = this_time;
}
seq_copy_to_input(event_rec, len);
}
EXPORT_SYMBOL(seq_input_event);
int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
{
unsigned char event_rec[EV_SZ], ev_code;
int p = 0, c, ev_size;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
if (mode == OPEN_READ)
return -EIO;
c = count;
while (c >= 4)
{
if (copy_from_user((char *) event_rec, &(buf)[p], 4))
goto out;
ev_code = event_rec[0];
if (ev_code == SEQ_FULLSIZE)
{
int err, fmt;
dev = *(unsigned short *) &event_rec[2];
if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0);
if (err < 0)
return err;
return err;
}
if (ev_code >= 128)
{
if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
{
printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
return -EINVAL;
}
ev_size = 8;
if (c < ev_size)
{
if (!seq_playing)
seq_startplay();
return count - c;
}
if (copy_from_user((char *)&event_rec[4],
&(buf)[p + 4], 4))
goto out;
}
else
{
if (seq_mode == SEQ_2)
{
printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
return -EINVAL;
}
ev_size = 4;
if (event_rec[0] != SEQ_MIDIPUTC)
obsolete_api_used = 1;
}
if (event_rec[0] == SEQ_MIDIPUTC)
{
if (!midi_opened[event_rec[2]])
{
int err, mode;
int dev = event_rec[2];
if (dev >= max_mididev || midi_devs[dev]==NULL)
{
/*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
return -ENXIO;
}
mode = translate_mode(file);
if ((err = midi_devs[dev]->open(dev, mode,
sequencer_midi_input, sequencer_midi_output)) < 0)
{
seq_reset();
printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
return err;
}
midi_opened[dev] = 1;
}
}
if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
{
int processed = count - c;
if (!seq_playing)
seq_startplay();
if (!processed && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
else
return processed;
}
p += ev_size;
c -= ev_size;
}
if (!seq_playing)
seq_startplay();
out:
return count;
}
static int seq_queue(unsigned char *note, char nonblock)
{
/*
* Test if there is space in the queue
*/
if (qlen >= SEQ_MAX_QUEUE)
if (!seq_playing)
seq_startplay(); /*
* Give chance to drain the queue
*/
if (!nonblock && qlen >= SEQ_MAX_QUEUE && !waitqueue_active(&seq_sleeper)) {
/*
* Sleep until there is enough space on the queue
*/
interruptible_sleep_on(&seq_sleeper);
}
if (qlen >= SEQ_MAX_QUEUE)
{
return 0; /*
* To be sure
*/
}
memcpy(&queue[qtail * EV_SZ], note, EV_SZ);
qtail = (qtail + 1) % SEQ_MAX_QUEUE;
qlen++;
return 1;
}
static int extended_event(unsigned char *q)
{
int dev = q[2];
if (dev < 0 || dev >= max_synthdev)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
switch (q[1])
{
case SEQ_NOTEOFF:
synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]);
break;
case SEQ_NOTEON:
if (q[4] > 127 && q[4] != 255)
return 0;
if (q[5] == 0)
{
synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]);
break;
}
synth_devs[dev]->start_note(dev, q[3], q[4], q[5]);
break;
case SEQ_PGMCHANGE:
synth_devs[dev]->set_instr(dev, q[3], q[4]);
break;
case SEQ_AFTERTOUCH:
synth_devs[dev]->aftertouch(dev, q[3], q[4]);
break;
case SEQ_BALANCE:
synth_devs[dev]->panning(dev, q[3], (char) q[4]);
break;
case SEQ_CONTROLLER:
synth_devs[dev]->controller(dev, q[3], q[4], (short) (q[5] | (q[6] << 8)));
break;
case SEQ_VOLMODE:
if (synth_devs[dev]->volume_method != NULL)
synth_devs[dev]->volume_method(dev, q[3]);
break;
default:
return -EINVAL;
}
return 0;
}
static int find_voice(int dev, int chn, int note)
{
unsigned short key;
int i;
key = (chn << 8) | (note + 1);
for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
if (synth_devs[dev]->alloc.map[i] == key)
return i;
return -1;
}
static int alloc_voice(int dev, int chn, int note)
{
unsigned short key;
int voice;
key = (chn << 8) | (note + 1);
voice = synth_devs[dev]->alloc_voice(dev, chn, note,
&synth_devs[dev]->alloc);
synth_devs[dev]->alloc.map[voice] = key;
synth_devs[dev]->alloc.alloc_times[voice] =
synth_devs[dev]->alloc.timestamp++;
return voice;
}
static void seq_chn_voice_event(unsigned char *event_rec)
{
#define dev event_rec[1]
#define cmd event_rec[2]
#define chn event_rec[3]
#define note event_rec[4]
#define parm event_rec[5]
int voice = -1;
if ((int) dev > max_synthdev || synth_devs[dev] == NULL)
return;
if (!(synth_open_mask & (1 << dev)))
return;
if (!synth_devs[dev])
return;
if (seq_mode == SEQ_2)
{
if (synth_devs[dev]->alloc_voice)
voice = find_voice(dev, chn, note);
if (cmd == MIDI_NOTEON && parm == 0)
{
cmd = MIDI_NOTEOFF;
parm = 64;
}
}
switch (cmd)
{
case MIDI_NOTEON:
if (note > 127 && note != 255) /* Not a seq2 feature */
return;
if (voice == -1 && seq_mode == SEQ_2 && synth_devs[dev]->alloc_voice)
{
/* Internal synthesizer (FM, GUS, etc) */
voice = alloc_voice(dev, chn, note);
}
if (voice == -1)
voice = chn;
if (seq_mode == SEQ_2 && (int) dev < num_synths)
{
/*
* The MIDI channel 10 is a percussive channel. Use the note
* number to select the proper patch (128 to 255) to play.
*/
if (chn == 9)
{
synth_devs[dev]->set_instr(dev, voice, 128 + note);
synth_devs[dev]->chn_info[chn].pgm_num = 128 + note;
}
synth_devs[dev]->setup_voice(dev, voice, chn);
}
synth_devs[dev]->start_note(dev, voice, note, parm);
break;
case MIDI_NOTEOFF:
if (voice == -1)
voice = chn;
synth_devs[dev]->kill_note(dev, voice, note, parm);
break;
case MIDI_KEY_PRESSURE:
if (voice == -1)
voice = chn;
synth_devs[dev]->aftertouch(dev, voice, parm);
break;
default:;
}
#undef dev
#undef cmd
#undef chn
#undef note
#undef parm
}
static void seq_chn_common_event(unsigned char *event_rec)
{
unsigned char dev = event_rec[1];
unsigned char cmd = event_rec[2];
unsigned char chn = event_rec[3];
unsigned char p1 = event_rec[4];
/* unsigned char p2 = event_rec[5]; */
unsigned short w14 = *(short *) &event_rec[6];
if ((int) dev > max_synthdev || synth_devs[dev] == NULL)
return;
if (!(synth_open_mask & (1 << dev)))
return;
if (!synth_devs[dev])
return;
switch (cmd)
{
case MIDI_PGM_CHANGE:
if (seq_mode == SEQ_2)
{
synth_devs[dev]->chn_info[chn].pgm_num = p1;
if ((int) dev >= num_synths)
synth_devs[dev]->set_instr(dev, chn, p1);
}
else
synth_devs[dev]->set_instr(dev, chn, p1);
break;
case MIDI_CTL_CHANGE:
if (seq_mode == SEQ_2)
{
if (chn > 15 || p1 > 127)
break;
synth_devs[dev]->chn_info[chn].controllers[p1] = w14 & 0x7f;
if (p1 < 32) /* Setting MSB should clear LSB to 0 */
synth_devs[dev]->chn_info[chn].controllers[p1 + 32] = 0;
if ((int) dev < num_synths)
{
int val = w14 & 0x7f;
int i, key;
if (p1 < 64) /* Combine MSB and LSB */
{
val = ((synth_devs[dev]->
chn_info[chn].controllers[p1 & ~32] & 0x7f) << 7)
| (synth_devs[dev]->
chn_info[chn].controllers[p1 | 32] & 0x7f);
p1 &= ~32;
}
/* Handle all playing notes on this channel */
key = ((int) chn << 8);
for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key)
synth_devs[dev]->controller(dev, i, p1, val);
}
else
synth_devs[dev]->controller(dev, chn, p1, w14);
}
else /* Mode 1 */
synth_devs[dev]->controller(dev, chn, p1, w14);
break;
case MIDI_PITCH_BEND:
if (seq_mode == SEQ_2)
{
synth_devs[dev]->chn_info[chn].bender_value = w14;
if ((int) dev < num_synths)
{
/* Handle all playing notes on this channel */
int i, key;
key = (chn << 8);
for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key)
synth_devs[dev]->bender(dev, i, w14);
}
else
synth_devs[dev]->bender(dev, chn, w14);
}
else /* MODE 1 */
synth_devs[dev]->bender(dev, chn, w14);
break;
default:;
}
}
static int seq_timing_event(unsigned char *event_rec)
{
unsigned char cmd = event_rec[1];
unsigned int parm = *(int *) &event_rec[4];
if (seq_mode == SEQ_2)
{
int ret;
if ((ret = tmr->event(tmr_no, event_rec)) == TIMER_ARMED)
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
return ret;
}
switch (cmd)
{
case TMR_WAIT_REL:
parm += prev_event_time;
/*
* NOTE! No break here. Execution of TMR_WAIT_REL continues in the
* next case (TMR_WAIT_ABS)
*/
case TMR_WAIT_ABS:
if (parm > 0)
{
long time;
time = parm;
prev_event_time = time;
seq_playing = 1;
request_sound_timer(time);
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
return TIMER_ARMED;
}
break;
case TMR_START:
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
break;
case TMR_STOP:
break;
case TMR_CONTINUE:
break;
case TMR_TEMPO:
break;
case TMR_ECHO:
if (seq_mode == SEQ_2)
seq_copy_to_input(event_rec, 8);
else
{
parm = (parm << 8 | SEQ_ECHO);
seq_copy_to_input((unsigned char *) &parm, 4);
}
break;
default:;
}
return TIMER_NOT_ARMED;
}
static void seq_local_event(unsigned char *event_rec)
{
unsigned char cmd = event_rec[1];
unsigned int parm = *((unsigned int *) &event_rec[4]);
switch (cmd)
{
case LOCL_STARTAUDIO:
DMAbuf_start_devices(parm);
break;
default:;
}
}
static void seq_sysex_message(unsigned char *event_rec)
{
unsigned int dev = event_rec[1];
int i, l = 0;
unsigned char *buf = &event_rec[2];
if (dev > max_synthdev)
return;
if (!(synth_open_mask & (1 << dev)))
return;
if (!synth_devs[dev])
return;
l = 0;
for (i = 0; i < 6 && buf[i] != 0xff; i++)
l = i + 1;
if (!synth_devs[dev]->send_sysex)
return;
if (l > 0)
synth_devs[dev]->send_sysex(dev, buf, l);
}
static int play_event(unsigned char *q)
{
/*
* NOTE! This routine returns
* 0 = normal event played.
* 1 = Timer armed. Suspend playback until timer callback.
* 2 = MIDI output buffer full. Restore queue and suspend until timer
*/
unsigned int *delay;
switch (q[0])
{
case SEQ_NOTEOFF:
if (synth_open_mask & (1 << 0))
if (synth_devs[0])
synth_devs[0]->kill_note(0, q[1], 255, q[3]);
break;
case SEQ_NOTEON:
if (q[4] < 128 || q[4] == 255)
if (synth_open_mask & (1 << 0))
if (synth_devs[0])
synth_devs[0]->start_note(0, q[1], q[2], q[3]);
break;
case SEQ_WAIT:
delay = (unsigned int *) q; /*
* Bytes 1 to 3 are containing the *
* delay in 'ticks'
*/
*delay = (*delay >> 8) & 0xffffff;
if (*delay > 0)
{
long time;
seq_playing = 1;
time = *delay;
prev_event_time = time;
request_sound_timer(time);
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
/*
* The timer is now active and will reinvoke this function
* after the timer expires. Return to the caller now.
*/
return 1;
}
break;
case SEQ_PGMCHANGE:
if (synth_open_mask & (1 << 0))
if (synth_devs[0])
synth_devs[0]->set_instr(0, q[1], q[2]);
break;
case SEQ_SYNCTIMER: /*
* Reset timer
*/
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
break;
case SEQ_MIDIPUTC: /*
* Put a midi character
*/
if (midi_opened[q[2]])
{
int dev;
dev = q[2];
if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
break;
if (!midi_devs[dev]->outputc(dev, q[1]))
{
/*
* Output FIFO is full. Wait one timer cycle and try again.
*/
seq_playing = 1;
request_sound_timer(-1);
return 2;
}
else
midi_written[dev] = 1;
}
break;
case SEQ_ECHO:
seq_copy_to_input(q, 4); /*
* Echo back to the process
*/
break;
case SEQ_PRIVATE:
if ((int) q[1] < max_synthdev)
synth_devs[q[1]]->hw_control(q[1], q);
break;
case SEQ_EXTENDED:
extended_event(q);
break;
case EV_CHN_VOICE:
seq_chn_voice_event(q);
break;
case EV_CHN_COMMON:
seq_chn_common_event(q);
break;
case EV_TIMING:
if (seq_timing_event(q) == TIMER_ARMED)
{
return 1;
}
break;
case EV_SEQ_LOCAL:
seq_local_event(q);
break;
case EV_SYSEX:
seq_sysex_message(q);
break;
default:;
}
return 0;
}
/* called also as timer in irq context */
static void seq_startplay(void)
{
int this_one, action;
unsigned long flags;
while (qlen > 0)
{
spin_lock_irqsave(&lock,flags);
qhead = ((this_one = qhead) + 1) % SEQ_MAX_QUEUE;
qlen--;
spin_unlock_irqrestore(&lock,flags);
seq_playing = 1;
if ((action = play_event(&queue[this_one * EV_SZ])))
{ /* Suspend playback. Next timer routine invokes this routine again */
if (action == 2)
{
qlen++;
qhead = this_one;
}
return;
}
}
seq_playing = 0;
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
}
static void reset_controllers(int dev, unsigned char *controller, int update_dev)
{
int i;
for (i = 0; i < 128; i++)
controller[i] = ctrl_def_values[i];
}
static void setup_mode2(void)
{
int dev;
max_synthdev = num_synths;
for (dev = 0; dev < num_midis; dev++)
{
if (midi_devs[dev] && midi_devs[dev]->converter != NULL)
{
synth_devs[max_synthdev++] = midi_devs[dev]->converter;
}
}
for (dev = 0; dev < max_synthdev; dev++)
{
int chn;
synth_devs[dev]->sysex_ptr = 0;
synth_devs[dev]->emulation = 0;
for (chn = 0; chn < 16; chn++)
{
synth_devs[dev]->chn_info[chn].pgm_num = 0;
reset_controllers(dev,
synth_devs[dev]->chn_info[chn].controllers,0);
synth_devs[dev]->chn_info[chn].bender_value = (1 << 7); /* Neutral */
synth_devs[dev]->chn_info[chn].bender_range = 200;
}
}
max_mididev = 0;
seq_mode = SEQ_2;
}
int sequencer_open(int dev, struct file *file)
{
int retval, mode, i;
int level, tmp;
if (!sequencer_ok)
sequencer_init();
level = ((dev & 0x0f) == SND_DEV_SEQ2) ? 2 : 1;
dev = dev >> 4;
mode = translate_mode(file);
DEB(printk("sequencer_open(dev=%d)\n", dev));
if (!sequencer_ok)
{
/* printk("Sound card: sequencer not initialized\n");*/
return -ENXIO;
}
if (dev) /* Patch manager device (obsolete) */
return -ENXIO;
if(synth_devs[dev] == NULL)
request_module("synth0");
if (mode == OPEN_READ)
{
if (!num_midis)
{
/*printk("Sequencer: No MIDI devices. Input not possible\n");*/
sequencer_busy = 0;
return -ENXIO;
}
}
if (sequencer_busy)
{
return -EBUSY;
}
sequencer_busy = 1;
obsolete_api_used = 0;
max_mididev = num_midis;
max_synthdev = num_synths;
pre_event_timeout = MAX_SCHEDULE_TIMEOUT;
seq_mode = SEQ_1;
if (pending_timer != -1)
{
tmr_no = pending_timer;
pending_timer = -1;
}
if (tmr_no == -1) /* Not selected yet */
{
int i, best;
best = -1;
for (i = 0; i < num_sound_timers; i++)
if (sound_timer_devs[i] && sound_timer_devs[i]->priority > best)
{
tmr_no = i;
best = sound_timer_devs[i]->priority;
}
if (tmr_no == -1) /* Should not be */
tmr_no = 0;
}
tmr = sound_timer_devs[tmr_no];
if (level == 2)
{
if (tmr == NULL)
{
/*printk("sequencer: No timer for level 2\n");*/
sequencer_busy = 0;
return -ENXIO;
}
setup_mode2();
}
if (!max_synthdev && !max_mididev)
{
sequencer_busy=0;
return -ENXIO;
}
synth_open_mask = 0;
for (i = 0; i < max_mididev; i++)
{
midi_opened[i] = 0;
midi_written[i] = 0;
}
for (i = 0; i < max_synthdev; i++)
{
if (synth_devs[i]==NULL)
continue;
if (!try_module_get(synth_devs[i]->owner))
continue;
if ((tmp = synth_devs[i]->open(i, mode)) < 0)
{
printk(KERN_WARNING "Sequencer: Warning! Cannot open synth device #%d (%d)\n", i, tmp);
if (synth_devs[i]->midi_dev)
printk(KERN_WARNING "(Maps to MIDI dev #%d)\n", synth_devs[i]->midi_dev);
}
else
{
synth_open_mask |= (1 << i);
if (synth_devs[i]->midi_dev)
midi_opened[synth_devs[i]->midi_dev] = 1;
}
}
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
if (seq_mode == SEQ_1 && (mode == OPEN_READ || mode == OPEN_READWRITE))
{
/*
* Initialize midi input devices
*/
for (i = 0; i < max_mididev; i++)
if (!midi_opened[i] && midi_devs[i])
{
if (!try_module_get(midi_devs[i]->owner))
continue;
if ((retval = midi_devs[i]->open(i, mode,
sequencer_midi_input, sequencer_midi_output)) >= 0)
{
midi_opened[i] = 1;
}
}
}
if (seq_mode == SEQ_2) {
if (try_module_get(tmr->owner))
tmr->open(tmr_no, seq_mode);
}
init_waitqueue_head(&seq_sleeper);
init_waitqueue_head(&midi_sleeper);
output_threshold = SEQ_MAX_QUEUE / 2;
return 0;
}
static void seq_drain_midi_queues(void)
{
int i, n;
/*
* Give the Midi drivers time to drain their output queues
*/
n = 1;
while (!signal_pending(current) && n)
{
n = 0;
for (i = 0; i < max_mididev; i++)
if (midi_opened[i] && midi_written[i])
if (midi_devs[i]->buffer_status != NULL)
if (midi_devs[i]->buffer_status(i))
n++;
/*
* Let's have a delay
*/
if (n)
interruptible_sleep_on_timeout(&seq_sleeper,
HZ/10);
}
}
void sequencer_release(int dev, struct file *file)
{
int i;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_release(dev=%d)\n", dev));
/*
* Wait until the queue is empty (if we don't have nonblock)
*/
if (mode != OPEN_READ && !(file->f_flags & O_NONBLOCK))
{
while (!signal_pending(current) && qlen > 0)
{
seq_sync();
interruptible_sleep_on_timeout(&seq_sleeper,
3*HZ);
/* Extra delay */
}
}
if (mode != OPEN_READ)
seq_drain_midi_queues(); /*
* Ensure the output queues are empty
*/
seq_reset();
if (mode != OPEN_READ)
seq_drain_midi_queues(); /*
* Flush the all notes off messages
*/
for (i = 0; i < max_synthdev; i++)
{
if (synth_open_mask & (1 << i)) /*
* Actually opened
*/
if (synth_devs[i])
{
synth_devs[i]->close(i);
module_put(synth_devs[i]->owner);
if (synth_devs[i]->midi_dev)
midi_opened[synth_devs[i]->midi_dev] = 0;
}
}
for (i = 0; i < max_mididev; i++)
{
if (midi_opened[i]) {
midi_devs[i]->close(i);
module_put(midi_devs[i]->owner);
}
}
if (seq_mode == SEQ_2) {
tmr->close(tmr_no);
module_put(tmr->owner);
}
if (obsolete_api_used)
printk(KERN_WARNING "/dev/music: Obsolete (4 byte) API was used by %s\n", current->comm);
sequencer_busy = 0;
}
static int seq_sync(void)
{
if (qlen && !seq_playing && !signal_pending(current))
seq_startplay();
if (qlen > 0)
interruptible_sleep_on_timeout(&seq_sleeper, HZ);
return qlen;
}
static void midi_outc(int dev, unsigned char data)
{
/*
* NOTE! Calls sleep(). Don't call this from interrupt.
*/
int n;
unsigned long flags;
/*
* This routine sends one byte to the Midi channel.
* If the output FIFO is full, it waits until there
* is space in the queue
*/
n = 3 * HZ; /* Timeout */
spin_lock_irqsave(&lock,flags);
while (n && !midi_devs[dev]->outputc(dev, data)) {
interruptible_sleep_on_timeout(&seq_sleeper, HZ/25);
n--;
}
spin_unlock_irqrestore(&lock,flags);
}
static void seq_reset(void)
{
/*
* NOTE! Calls sleep(). Don't call this from interrupt.
*/
int i;
int chn;
unsigned long flags;
sound_stop_timer();
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
qlen = qhead = qtail = 0;
iqlen = iqhead = iqtail = 0;
for (i = 0; i < max_synthdev; i++)
if (synth_open_mask & (1 << i))
if (synth_devs[i])
synth_devs[i]->reset(i);
if (seq_mode == SEQ_2)
{
for (chn = 0; chn < 16; chn++)
for (i = 0; i < max_synthdev; i++)
if (synth_open_mask & (1 << i))
if (synth_devs[i])
{
synth_devs[i]->controller(i, chn, 123, 0); /* All notes off */
synth_devs[i]->controller(i, chn, 121, 0); /* Reset all ctl */
synth_devs[i]->bender(i, chn, 1 << 13); /* Bender off */
}
}
else /* seq_mode == SEQ_1 */
{
for (i = 0; i < max_mididev; i++)
if (midi_written[i]) /*
* Midi used. Some notes may still be playing
*/
{
/*
* Sending just a ACTIVE SENSING message should be enough to stop all
* playing notes. Since there are devices not recognizing the
* active sensing, we have to send some all notes off messages also.
*/
midi_outc(i, 0xfe);
for (chn = 0; chn < 16; chn++)
{
midi_outc(i, (unsigned char) (0xb0 + (chn & 0x0f))); /* control change */
midi_outc(i, 0x7b); /* All notes off */
midi_outc(i, 0); /* Dummy parameter */
}
midi_devs[i]->close(i);
midi_written[i] = 0;
midi_opened[i] = 0;
}
}
seq_playing = 0;
spin_lock_irqsave(&lock,flags);
if (waitqueue_active(&seq_sleeper)) {
/* printk( "Sequencer Warning: Unexpected sleeping process - Waking up\n"); */
wake_up(&seq_sleeper);
}
spin_unlock_irqrestore(&lock,flags);
}
static void seq_panic(void)
{
/*
* This routine is called by the application in case the user
* wants to reset the system to the default state.
*/
seq_reset();
/*
* Since some of the devices don't recognize the active sensing and
* all notes off messages, we have to shut all notes manually.
*
* TO BE IMPLEMENTED LATER
*/
/*
* Also return the controllers to their default states
*/
}
int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg)
{
int midi_dev, orig_dev, val, err;
int mode = translate_mode(file);
struct synth_info inf;
struct seq_event_rec event_rec;
unsigned long flags;
int __user *p = arg;
orig_dev = dev = dev >> 4;
switch (cmd)
{
case SNDCTL_TMR_TIMEBASE:
case SNDCTL_TMR_TEMPO:
case SNDCTL_TMR_START:
case SNDCTL_TMR_STOP:
case SNDCTL_TMR_CONTINUE:
case SNDCTL_TMR_METRONOME:
case SNDCTL_TMR_SOURCE:
if (seq_mode != SEQ_2)
return -EINVAL;
return tmr->ioctl(tmr_no, cmd, arg);
case SNDCTL_TMR_SELECT:
if (seq_mode != SEQ_2)
return -EINVAL;
if (get_user(pending_timer, p))
return -EFAULT;
if (pending_timer < 0 || pending_timer >= num_sound_timers || sound_timer_devs[pending_timer] == NULL)
{
pending_timer = -1;
return -EINVAL;
}
val = pending_timer;
break;
case SNDCTL_SEQ_PANIC:
seq_panic();
return -EINVAL;
case SNDCTL_SEQ_SYNC:
if (mode == OPEN_READ)
return 0;
while (qlen > 0 && !signal_pending(current))
seq_sync();
return qlen ? -EINTR : 0;
case SNDCTL_SEQ_RESET:
seq_reset();
return 0;
case SNDCTL_SEQ_TESTMIDI:
if (__get_user(midi_dev, p))
return -EFAULT;
if (midi_dev < 0 || midi_dev >= max_mididev || !midi_devs[midi_dev])
return -ENXIO;
if (!midi_opened[midi_dev] &&
(err = midi_devs[midi_dev]->open(midi_dev, mode, sequencer_midi_input,
sequencer_midi_output)) < 0)
return err;
midi_opened[midi_dev] = 1;
return 0;
case SNDCTL_SEQ_GETINCOUNT:
if (mode == OPEN_WRITE)
return 0;
val = iqlen;
break;
case SNDCTL_SEQ_GETOUTCOUNT:
if (mode == OPEN_READ)
return 0;
val = SEQ_MAX_QUEUE - qlen;
break;
case SNDCTL_SEQ_GETTIME:
if (seq_mode == SEQ_2)
return tmr->ioctl(tmr_no, cmd, arg);
val = jiffies - seq_time;
break;
case SNDCTL_SEQ_CTRLRATE:
/*
* If *arg == 0, just return the current rate
*/
if (seq_mode == SEQ_2)
return tmr->ioctl(tmr_no, cmd, arg);
if (get_user(val, p))
return -EFAULT;
if (val != 0)
return -EINVAL;
val = HZ;
break;
case SNDCTL_SEQ_RESETSAMPLES:
case SNDCTL_SYNTH_REMOVESAMPLE:
case SNDCTL_SYNTH_CONTROL:
if (get_user(dev, p))
return -EFAULT;
if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
return synth_devs[dev]->ioctl(dev, cmd, arg);
case SNDCTL_SEQ_NRSYNTHS:
val = max_synthdev;
break;
case SNDCTL_SEQ_NRMIDIS:
val = max_mididev;
break;
case SNDCTL_SYNTH_MEMAVL:
if (get_user(dev, p))
return -EFAULT;
if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
val = synth_devs[dev]->ioctl(dev, cmd, arg);
break;
case SNDCTL_FM_4OP_ENABLE:
if (get_user(dev, p))
return -EFAULT;
if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
synth_devs[dev]->ioctl(dev, cmd, arg);
return 0;
case SNDCTL_SYNTH_INFO:
if (get_user(dev, &((struct synth_info __user *)arg)->device))
return -EFAULT;
if (dev < 0 || dev >= max_synthdev)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
return synth_devs[dev]->ioctl(dev, cmd, arg);
/* Like SYNTH_INFO but returns ID in the name field */
case SNDCTL_SYNTH_ID:
if (get_user(dev, &((struct synth_info __user *)arg)->device))
return -EFAULT;
if (dev < 0 || dev >= max_synthdev)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
memcpy(&inf, synth_devs[dev]->info, sizeof(inf));
strlcpy(inf.name, synth_devs[dev]->id, sizeof(inf.name));
inf.device = dev;
return copy_to_user(arg, &inf, sizeof(inf))?-EFAULT:0;
case SNDCTL_SEQ_OUTOFBAND:
if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
return -EFAULT;
spin_lock_irqsave(&lock,flags);
play_event(event_rec.arr);
spin_unlock_irqrestore(&lock,flags);
return 0;
case SNDCTL_MIDI_INFO:
if (get_user(dev, &((struct midi_info __user *)arg)->device))
return -EFAULT;
if (dev < 0 || dev >= max_mididev || !midi_devs[dev])
return -ENXIO;
midi_devs[dev]->info.device = dev;
return copy_to_user(arg, &midi_devs[dev]->info, sizeof(struct midi_info))?-EFAULT:0;
case SNDCTL_SEQ_THRESHOLD:
if (get_user(val, p))
return -EFAULT;
if (val < 1)
val = 1;
if (val >= SEQ_MAX_QUEUE)
val = SEQ_MAX_QUEUE - 1;
output_threshold = val;
return 0;
case SNDCTL_MIDI_PRETIME:
if (get_user(val, p))
return -EFAULT;
if (val < 0)
val = 0;
val = (HZ * val) / 10;
pre_event_timeout = val;
break;
default:
if (mode == OPEN_READ)
return -EIO;
if (!synth_devs[0])
return -ENXIO;
if (!(synth_open_mask & (1 << 0)))
return -ENXIO;
if (!synth_devs[0]->ioctl)
return -EINVAL;
return synth_devs[0]->ioctl(0, cmd, arg);
}
return put_user(val, p);
}
/* No kernel lock - we're using the global irq lock here */
unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait)
{
unsigned long flags;
unsigned int mask = 0;
dev = dev >> 4;
spin_lock_irqsave(&lock,flags);
/* input */
poll_wait(file, &midi_sleeper, wait);
if (iqlen)
mask |= POLLIN | POLLRDNORM;
/* output */
poll_wait(file, &seq_sleeper, wait);
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
mask |= POLLOUT | POLLWRNORM;
spin_unlock_irqrestore(&lock,flags);
return mask;
}
void sequencer_timer(unsigned long dummy)
{
seq_startplay();
}
EXPORT_SYMBOL(sequencer_timer);
int note_to_freq(int note_num)
{
/*
* This routine converts a midi note to a frequency (multiplied by 1000)
*/
int note, octave, note_freq;
static int notes[] =
{
261632, 277189, 293671, 311132, 329632, 349232,
369998, 391998, 415306, 440000, 466162, 493880
};
#define BASE_OCTAVE 5
octave = note_num / 12;
note = note_num % 12;
note_freq = notes[note];
if (octave < BASE_OCTAVE)
note_freq >>= (BASE_OCTAVE - octave);
else if (octave > BASE_OCTAVE)
note_freq <<= (octave - BASE_OCTAVE);
/*
* note_freq >>= 1;
*/
return note_freq;
}
EXPORT_SYMBOL(note_to_freq);
unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
int vibrato_cents)
{
unsigned long amount;
int negative, semitones, cents, multiplier = 1;
if (!bend)
return base_freq;
if (!range)
return base_freq;
if (!base_freq)
return base_freq;
if (range >= 8192)
range = 8192;
bend = bend * range / 8192; /* Convert to cents */
bend += vibrato_cents;
if (!bend)
return base_freq;
negative = bend < 0 ? 1 : 0;
if (bend < 0)
bend *= -1;
if (bend > range)
bend = range;
/*
if (bend > 2399)
bend = 2399;
*/
while (bend > 2399)
{
multiplier *= 4;
bend -= 2400;
}
semitones = bend / 100;
cents = bend % 100;
amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000;
if (negative)
return (base_freq * 10000) / amount; /* Bend down */
else
return (base_freq * amount) / 10000; /* Bend up */
}
EXPORT_SYMBOL(compute_finetune);
void sequencer_init(void)
{
if (sequencer_ok)
return;
queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ);
if (queue == NULL)
{
printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n");
return;
}
iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ);
if (iqueue == NULL)
{
printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n");
vfree(queue);
return;
}
sequencer_ok = 1;
}
EXPORT_SYMBOL(sequencer_init);
void sequencer_unload(void)
{
vfree(queue);
vfree(iqueue);
queue = iqueue = NULL;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3447_4 |
crossvul-cpp_data_bad_2417_0 | /*
** Copyright (C) 2002-2013 Erik de Castro Lopo <erikd@mega-nerd.com>
** Copyright (C) 2003 Ross Bencina <rbencina@iprimus.com.au>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU Lesser General Public License as published by
** the Free Software Foundation; either version 2.1 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU Lesser General Public License for more details.
**
** You should have received a copy of the GNU Lesser General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
** The file is split into three sections as follows:
** - The top section (USE_WINDOWS_API == 0) for Linux, Unix and MacOSX
** systems (including Cygwin).
** - The middle section (USE_WINDOWS_API == 1) for microsoft windows
** (including MinGW) using the native windows API.
** - A legacy windows section which attempted to work around grevious
** bugs in microsoft's POSIX implementation.
*/
/*
** The header file sfconfig.h MUST be included before the others to ensure
** that large file support is enabled correctly on Unix systems.
*/
#include "sfconfig.h"
#include <stdio.h>
#include <stdlib.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#if (HAVE_DECL_S_IRGRP == 0)
#include <sf_unistd.h>
#endif
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/stat.h>
#include "sndfile.h"
#include "common.h"
#define SENSIBLE_SIZE (0x40000000)
/*
** Neat solution to the Win32/OS2 binary file flage requirement.
** If O_BINARY isn't already defined by the inclusion of the system
** headers, set it to zero.
*/
#ifndef O_BINARY
#define O_BINARY 0
#endif
static void psf_log_syserr (SF_PRIVATE *psf, int error) ;
#if (USE_WINDOWS_API == 0)
/*------------------------------------------------------------------------------
** Win32 stuff at the bottom of the file. Unix and other sensible OSes here.
*/
static int psf_close_fd (int fd) ;
static int psf_open_fd (PSF_FILE * pfile) ;
static sf_count_t psf_get_filelen_fd (int fd) ;
int
psf_fopen (SF_PRIVATE *psf)
{
psf->error = 0 ;
psf->file.filedes = psf_open_fd (&psf->file) ;
if (psf->file.filedes == - SFE_BAD_OPEN_MODE)
{ psf->error = SFE_BAD_OPEN_MODE ;
psf->file.filedes = -1 ;
return psf->error ;
} ;
if (psf->file.filedes == -1)
psf_log_syserr (psf, errno) ;
return psf->error ;
} /* psf_fopen */
int
psf_fclose (SF_PRIVATE *psf)
{ int retval ;
if (psf->virtual_io)
return 0 ;
if (psf->file.do_not_close_descriptor)
{ psf->file.filedes = -1 ;
return 0 ;
} ;
if ((retval = psf_close_fd (psf->file.filedes)) == -1)
psf_log_syserr (psf, errno) ;
psf->file.filedes = -1 ;
return retval ;
} /* psf_fclose */
int
psf_open_rsrc (SF_PRIVATE *psf)
{
if (psf->rsrc.filedes > 0)
return 0 ;
/* Test for MacOSX style resource fork on HPFS or HPFS+ filesystems. */
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s/..namedfork/rsrc", psf->file.path.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.filedes = psf_open_fd (&psf->rsrc)) >= 0)
{ psf->rsrclength = psf_get_filelen_fd (psf->rsrc.filedes) ;
if (psf->rsrclength > 0 || (psf->rsrc.mode & SFM_WRITE))
return SFE_NO_ERROR ;
psf_close_fd (psf->rsrc.filedes) ;
psf->rsrc.filedes = -1 ;
} ;
if (psf->rsrc.filedes == - SFE_BAD_OPEN_MODE)
{ psf->error = SFE_BAD_OPEN_MODE ;
return psf->error ;
} ;
/*
** Now try for a resource fork stored as a separate file in the same
** directory, but preceded with a dot underscore.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s._%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.filedes = psf_open_fd (&psf->rsrc)) >= 0)
{ psf->rsrclength = psf_get_filelen_fd (psf->rsrc.filedes) ;
return SFE_NO_ERROR ;
} ;
/*
** Now try for a resource fork stored in a separate file in the
** .AppleDouble/ directory.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s.AppleDouble/%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.filedes = psf_open_fd (&psf->rsrc)) >= 0)
{ psf->rsrclength = psf_get_filelen_fd (psf->rsrc.filedes) ;
return SFE_NO_ERROR ;
} ;
/* No resource file found. */
if (psf->rsrc.filedes == -1)
psf_log_syserr (psf, errno) ;
psf->rsrc.filedes = -1 ;
return psf->error ;
} /* psf_open_rsrc */
sf_count_t
psf_get_filelen (SF_PRIVATE *psf)
{ sf_count_t filelen ;
if (psf->virtual_io)
return psf->vio.get_filelen (psf->vio_user_data) ;
filelen = psf_get_filelen_fd (psf->file.filedes) ;
if (filelen == -1)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
if (filelen == -SFE_BAD_STAT_SIZE)
{ psf->error = SFE_BAD_STAT_SIZE ;
return (sf_count_t) -1 ;
} ;
switch (psf->file.mode)
{ case SFM_WRITE :
filelen = filelen - psf->fileoffset ;
break ;
case SFM_READ :
if (psf->fileoffset > 0 && psf->filelength > 0)
filelen = psf->filelength ;
break ;
case SFM_RDWR :
/*
** Cannot open embedded files SFM_RDWR so we don't need to
** subtract psf->fileoffset. We already have the answer we
** need.
*/
break ;
default :
/* Shouldn't be here, so return error. */
filelen = -1 ;
} ;
return filelen ;
} /* psf_get_filelen */
int
psf_close_rsrc (SF_PRIVATE *psf)
{ psf_close_fd (psf->rsrc.filedes) ;
psf->rsrc.filedes = -1 ;
return 0 ;
} /* psf_close_rsrc */
int
psf_set_stdio (SF_PRIVATE *psf)
{ int error = 0 ;
switch (psf->file.mode)
{ case SFM_RDWR :
error = SFE_OPEN_PIPE_RDWR ;
break ;
case SFM_READ :
psf->file.filedes = 0 ;
break ;
case SFM_WRITE :
psf->file.filedes = 1 ;
break ;
default :
error = SFE_BAD_OPEN_MODE ;
break ;
} ;
psf->filelength = 0 ;
return error ;
} /* psf_set_stdio */
void
psf_set_file (SF_PRIVATE *psf, int fd)
{ psf->file.filedes = fd ;
} /* psf_set_file */
int
psf_file_valid (SF_PRIVATE *psf)
{ return (psf->file.filedes >= 0) ? SF_TRUE : SF_FALSE ;
} /* psf_set_file */
sf_count_t
psf_fseek (SF_PRIVATE *psf, sf_count_t offset, int whence)
{ sf_count_t current_pos, new_position ;
if (psf->virtual_io)
return psf->vio.seek (offset, whence, psf->vio_user_data) ;
current_pos = psf_ftell (psf) ;
switch (whence)
{ case SEEK_SET :
offset += psf->fileoffset ;
break ;
case SEEK_END :
if (psf->file.mode == SFM_WRITE)
{ new_position = lseek (psf->file.filedes, offset, whence) ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
return new_position - psf->fileoffset ;
} ;
/* Transform SEEK_END into a SEEK_SET, ie find the file
** length add the requested offset (should be <= 0) to
** get the offset wrt the start of file.
*/
whence = SEEK_SET ;
offset = lseek (psf->file.filedes, 0, SEEK_END) + offset ;
break ;
case SEEK_CUR :
/* Translate a SEEK_CUR into a SEEK_SET. */
offset += current_pos ;
whence = SEEK_SET ;
break ;
default :
/* We really should not be here. */
psf_log_printf (psf, "psf_fseek : whence is %d *****.\n", whence) ;
return 0 ;
} ;
if (current_pos != offset)
new_position = lseek (psf->file.filedes, offset, whence) ;
else
new_position = offset ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
new_position -= psf->fileoffset ;
return new_position ;
} /* psf_fseek */
sf_count_t
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the read down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
count = read (psf->file.filedes, ((char*) ptr) + total, (size_t) count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fread */
sf_count_t
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : items ;
count = write (psf->file.filedes, ((const char*) ptr) + total, count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fwrite */
sf_count_t
psf_ftell (SF_PRIVATE *psf)
{ sf_count_t pos ;
if (psf->virtual_io)
return psf->vio.tell (psf->vio_user_data) ;
if (psf->is_pipe)
return psf->pipeoffset ;
pos = lseek (psf->file.filedes, 0, SEEK_CUR) ;
if (pos == ((sf_count_t) -1))
{ psf_log_syserr (psf, errno) ;
return -1 ;
} ;
return pos - psf->fileoffset ;
} /* psf_ftell */
static int
psf_close_fd (int fd)
{ int retval ;
if (fd < 0)
return 0 ;
while ((retval = close (fd)) == -1 && errno == EINTR)
/* Do nothing. */ ;
return retval ;
} /* psf_close_fd */
sf_count_t
psf_fgets (char *buffer, sf_count_t bufsize, SF_PRIVATE *psf)
{ sf_count_t k = 0 ;
sf_count_t count ;
while (k < bufsize - 1)
{ count = read (psf->file.filedes, &(buffer [k]), 1) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0 || buffer [k++] == '\n')
break ;
} ;
buffer [k] = 0 ;
return k ;
} /* psf_fgets */
int
psf_is_pipe (SF_PRIVATE *psf)
{ struct stat statbuf ;
if (psf->virtual_io)
return SF_FALSE ;
if (fstat (psf->file.filedes, &statbuf) == -1)
{ psf_log_syserr (psf, errno) ;
/* Default to maximum safety. */
return SF_TRUE ;
} ;
if (S_ISFIFO (statbuf.st_mode) || S_ISSOCK (statbuf.st_mode))
return SF_TRUE ;
return SF_FALSE ;
} /* psf_is_pipe */
static sf_count_t
psf_get_filelen_fd (int fd)
{
#if (SIZEOF_OFF_T == 4 && SIZEOF_SF_COUNT_T == 8 && HAVE_FSTAT64)
struct stat64 statbuf ;
if (fstat64 (fd, &statbuf) == -1)
return (sf_count_t) -1 ;
return statbuf.st_size ;
#else
struct stat statbuf ;
if (fstat (fd, &statbuf) == -1)
return (sf_count_t) -1 ;
return statbuf.st_size ;
#endif
} /* psf_get_filelen_fd */
int
psf_ftruncate (SF_PRIVATE *psf, sf_count_t len)
{ int retval ;
/* Returns 0 on success, non-zero on failure. */
if (len < 0)
return -1 ;
if ((sizeof (off_t) < sizeof (sf_count_t)) && len > 0x7FFFFFFF)
return -1 ;
retval = ftruncate (psf->file.filedes, len) ;
if (retval == -1)
psf_log_syserr (psf, errno) ;
return retval ;
} /* psf_ftruncate */
void
psf_init_files (SF_PRIVATE *psf)
{ psf->file.filedes = -1 ;
psf->rsrc.filedes = -1 ;
psf->file.savedes = -1 ;
} /* psf_init_files */
void
psf_use_rsrc (SF_PRIVATE *psf, int on_off)
{
if (on_off)
{ if (psf->file.filedes != psf->rsrc.filedes)
{ psf->file.savedes = psf->file.filedes ;
psf->file.filedes = psf->rsrc.filedes ;
} ;
}
else if (psf->file.filedes == psf->rsrc.filedes)
psf->file.filedes = psf->file.savedes ;
return ;
} /* psf_use_rsrc */
static int
psf_open_fd (PSF_FILE * pfile)
{ int fd, oflag, mode ;
/*
** Sanity check. If everything is OK, this test and the printfs will
** be optimised out. This is meant to catch the problems caused by
** "sfconfig.h" being included after <stdio.h>.
*/
if (sizeof (sf_count_t) != 8)
{ puts ("\n\n*** Fatal error : sizeof (sf_count_t) != 8") ;
puts ("*** This means that libsndfile was not configured correctly.\n") ;
exit (1) ;
} ;
switch (pfile->mode)
{ case SFM_READ :
oflag = O_RDONLY | O_BINARY ;
mode = 0 ;
break ;
case SFM_WRITE :
oflag = O_WRONLY | O_CREAT | O_TRUNC | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH ;
break ;
case SFM_RDWR :
oflag = O_RDWR | O_CREAT | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH ;
break ;
default :
return - SFE_BAD_OPEN_MODE ;
break ;
} ;
if (mode == 0)
fd = open (pfile->path.c, oflag) ;
else
fd = open (pfile->path.c, oflag, mode) ;
return fd ;
} /* psf_open_fd */
static void
psf_log_syserr (SF_PRIVATE *psf, int error)
{
/* Only log an error if no error has been set yet. */
if (psf->error == 0)
{ psf->error = SFE_SYSTEM ;
snprintf (psf->syserr, sizeof (psf->syserr), "System error : %s.", strerror (error)) ;
} ;
return ;
} /* psf_log_syserr */
void
psf_fsync (SF_PRIVATE *psf)
{
#if HAVE_FSYNC
if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR)
fsync (psf->file.filedes) ;
#else
psf = NULL ;
#endif
} /* psf_fsync */
#elif USE_WINDOWS_API
/* Win32 file i/o functions implemented using native Win32 API */
#include <windows.h>
#include <io.h>
static int psf_close_handle (HANDLE handle) ;
static HANDLE psf_open_handle (PSF_FILE * pfile) ;
static sf_count_t psf_get_filelen_handle (HANDLE handle) ;
/* USE_WINDOWS_API */ int
psf_fopen (SF_PRIVATE *psf)
{
psf->error = 0 ;
psf->file.handle = psf_open_handle (&psf->file) ;
if (psf->file.handle == NULL)
psf_log_syserr (psf, GetLastError ()) ;
return psf->error ;
} /* psf_fopen */
/* USE_WINDOWS_API */ int
psf_fclose (SF_PRIVATE *psf)
{ int retval ;
if (psf->virtual_io)
return 0 ;
if (psf->file.do_not_close_descriptor)
{ psf->file.handle = NULL ;
return 0 ;
} ;
if ((retval = psf_close_handle (psf->file.handle)) == -1)
psf_log_syserr (psf, GetLastError ()) ;
psf->file.handle = NULL ;
return retval ;
} /* psf_fclose */
/* USE_WINDOWS_API */ int
psf_open_rsrc (SF_PRIVATE *psf)
{
if (psf->rsrc.handle != NULL)
return 0 ;
/* Test for MacOSX style resource fork on HPFS or HPFS+ filesystems. */
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s/rsrc", psf->file.path.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.handle = psf_open_handle (&psf->rsrc)) != NULL)
{ psf->rsrclength = psf_get_filelen_handle (psf->rsrc.handle) ;
return SFE_NO_ERROR ;
} ;
/*
** Now try for a resource fork stored as a separate file in the same
** directory, but preceded with a dot underscore.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s._%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.handle = psf_open_handle (&psf->rsrc)) != NULL)
{ psf->rsrclength = psf_get_filelen_handle (psf->rsrc.handle) ;
return SFE_NO_ERROR ;
} ;
/*
** Now try for a resource fork stored in a separate file in the
** .AppleDouble/ directory.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s.AppleDouble/%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.handle = psf_open_handle (&psf->rsrc)) != NULL)
{ psf->rsrclength = psf_get_filelen_handle (psf->rsrc.handle) ;
return SFE_NO_ERROR ;
} ;
/* No resource file found. */
if (psf->rsrc.handle == NULL)
psf_log_syserr (psf, GetLastError ()) ;
psf->rsrc.handle = NULL ;
return psf->error ;
} /* psf_open_rsrc */
/* USE_WINDOWS_API */ sf_count_t
psf_get_filelen (SF_PRIVATE *psf)
{ sf_count_t filelen ;
if (psf->virtual_io)
return psf->vio.get_filelen (psf->vio_user_data) ;
filelen = psf_get_filelen_handle (psf->file.handle) ;
if (filelen == -1)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
if (filelen == -SFE_BAD_STAT_SIZE)
{ psf->error = SFE_BAD_STAT_SIZE ;
return (sf_count_t) -1 ;
} ;
switch (psf->file.mode)
{ case SFM_WRITE :
filelen = filelen - psf->fileoffset ;
break ;
case SFM_READ :
if (psf->fileoffset > 0 && psf->filelength > 0)
filelen = psf->filelength ;
break ;
case SFM_RDWR :
/*
** Cannot open embedded files SFM_RDWR so we don't need to
** subtract psf->fileoffset. We already have the answer we
** need.
*/
break ;
default :
/* Shouldn't be here, so return error. */
filelen = -1 ;
} ;
return filelen ;
} /* psf_get_filelen */
/* USE_WINDOWS_API */ void
psf_init_files (SF_PRIVATE *psf)
{ psf->file.handle = NULL ;
psf->rsrc.handle = NULL ;
psf->file.hsaved = NULL ;
} /* psf_init_files */
/* USE_WINDOWS_API */ void
psf_use_rsrc (SF_PRIVATE *psf, int on_off)
{
if (on_off)
{ if (psf->file.handle != psf->rsrc.handle)
{ psf->file.hsaved = psf->file.handle ;
psf->file.handle = psf->rsrc.handle ;
} ;
}
else if (psf->file.handle == psf->rsrc.handle)
psf->file.handle = psf->file.hsaved ;
return ;
} /* psf_use_rsrc */
/* USE_WINDOWS_API */ static HANDLE
psf_open_handle (PSF_FILE * pfile)
{ DWORD dwDesiredAccess ;
DWORD dwShareMode ;
DWORD dwCreationDistribution ;
HANDLE handle ;
switch (pfile->mode)
{ case SFM_READ :
dwDesiredAccess = GENERIC_READ ;
dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE ;
dwCreationDistribution = OPEN_EXISTING ;
break ;
case SFM_WRITE :
dwDesiredAccess = GENERIC_WRITE ;
dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE ;
dwCreationDistribution = CREATE_ALWAYS ;
break ;
case SFM_RDWR :
dwDesiredAccess = GENERIC_READ | GENERIC_WRITE ;
dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE ;
dwCreationDistribution = OPEN_ALWAYS ;
break ;
default :
return NULL ;
} ;
if (pfile->use_wchar)
handle = CreateFileW (
pfile->path.wc, /* pointer to name of the file */
dwDesiredAccess, /* access (read-write) mode */
dwShareMode, /* share mode */
0, /* pointer to security attributes */
dwCreationDistribution, /* how to create */
FILE_ATTRIBUTE_NORMAL, /* file attributes (could use FILE_FLAG_SEQUENTIAL_SCAN) */
NULL /* handle to file with attributes to copy */
) ;
else
handle = CreateFile (
pfile->path.c, /* pointer to name of the file */
dwDesiredAccess, /* access (read-write) mode */
dwShareMode, /* share mode */
0, /* pointer to security attributes */
dwCreationDistribution, /* how to create */
FILE_ATTRIBUTE_NORMAL, /* file attributes (could use FILE_FLAG_SEQUENTIAL_SCAN) */
NULL /* handle to file with attributes to copy */
) ;
if (handle == INVALID_HANDLE_VALUE)
return NULL ;
return handle ;
} /* psf_open_handle */
/* USE_WINDOWS_API */ static void
psf_log_syserr (SF_PRIVATE *psf, int error)
{ LPVOID lpMsgBuf ;
/* Only log an error if no error has been set yet. */
if (psf->error == 0)
{ psf->error = SFE_SYSTEM ;
FormatMessage (
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
error,
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &lpMsgBuf,
0,
NULL
) ;
snprintf (psf->syserr, sizeof (psf->syserr), "System error : %s", (char*) lpMsgBuf) ;
LocalFree (lpMsgBuf) ;
} ;
return ;
} /* psf_log_syserr */
/* USE_WINDOWS_API */ int
psf_close_rsrc (SF_PRIVATE *psf)
{ psf_close_handle (psf->rsrc.handle) ;
psf->rsrc.handle = NULL ;
return 0 ;
} /* psf_close_rsrc */
/* USE_WINDOWS_API */ int
psf_set_stdio (SF_PRIVATE *psf)
{ HANDLE handle = NULL ;
int error = 0 ;
switch (psf->file.mode)
{ case SFM_RDWR :
error = SFE_OPEN_PIPE_RDWR ;
break ;
case SFM_READ :
handle = GetStdHandle (STD_INPUT_HANDLE) ;
psf->file.do_not_close_descriptor = 1 ;
break ;
case SFM_WRITE :
handle = GetStdHandle (STD_OUTPUT_HANDLE) ;
psf->file.do_not_close_descriptor = 1 ;
break ;
default :
error = SFE_BAD_OPEN_MODE ;
break ;
} ;
psf->file.handle = handle ;
psf->filelength = 0 ;
return error ;
} /* psf_set_stdio */
/* USE_WINDOWS_API */ void
psf_set_file (SF_PRIVATE *psf, int fd)
{ HANDLE handle ;
intptr_t osfhandle ;
osfhandle = _get_osfhandle (fd) ;
handle = (HANDLE) osfhandle ;
psf->file.handle = handle ;
} /* psf_set_file */
/* USE_WINDOWS_API */ int
psf_file_valid (SF_PRIVATE *psf)
{ if (psf->file.handle == NULL)
return SF_FALSE ;
if (psf->file.handle == INVALID_HANDLE_VALUE)
return SF_FALSE ;
return SF_TRUE ;
} /* psf_set_file */
/* USE_WINDOWS_API */ sf_count_t
psf_fseek (SF_PRIVATE *psf, sf_count_t offset, int whence)
{ sf_count_t new_position ;
LONG lDistanceToMove, lDistanceToMoveHigh ;
DWORD dwMoveMethod ;
DWORD dwResult, dwError ;
if (psf->virtual_io)
return psf->vio.seek (offset, whence, psf->vio_user_data) ;
switch (whence)
{ case SEEK_SET :
offset += psf->fileoffset ;
dwMoveMethod = FILE_BEGIN ;
break ;
case SEEK_END :
dwMoveMethod = FILE_END ;
break ;
default :
dwMoveMethod = FILE_CURRENT ;
break ;
} ;
lDistanceToMove = (DWORD) (offset & 0xFFFFFFFF) ;
lDistanceToMoveHigh = (DWORD) ((offset >> 32) & 0xFFFFFFFF) ;
dwResult = SetFilePointer (psf->file.handle, lDistanceToMove, &lDistanceToMoveHigh, dwMoveMethod) ;
if (dwResult == 0xFFFFFFFF)
dwError = GetLastError () ;
else
dwError = NO_ERROR ;
if (dwError != NO_ERROR)
{ psf_log_syserr (psf, dwError) ;
return -1 ;
} ;
new_position = (dwResult + ((__int64) lDistanceToMoveHigh << 32)) - psf->fileoffset ;
return new_position ;
} /* psf_fseek */
/* USE_WINDOWS_API */ sf_count_t
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
DWORD dwNumberOfBytesRead ;
if (psf->virtual_io)
return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
if (ReadFile (psf->file.handle, ((char*) ptr) + total, count, &dwNumberOfBytesRead, 0) == 0)
{ psf_log_syserr (psf, GetLastError ()) ;
break ;
}
else
count = dwNumberOfBytesRead ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fread */
/* USE_WINDOWS_API */ sf_count_t
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
DWORD dwNumberOfBytesWritten ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes * items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
if (WriteFile (psf->file.handle, ((const char*) ptr) + total, count, &dwNumberOfBytesWritten, 0) == 0)
{ psf_log_syserr (psf, GetLastError ()) ;
break ;
}
else
count = dwNumberOfBytesWritten ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fwrite */
/* USE_WINDOWS_API */ sf_count_t
psf_ftell (SF_PRIVATE *psf)
{ sf_count_t pos ;
LONG lDistanceToMoveLow, lDistanceToMoveHigh ;
DWORD dwResult, dwError ;
if (psf->virtual_io)
return psf->vio.tell (psf->vio_user_data) ;
if (psf->is_pipe)
return psf->pipeoffset ;
lDistanceToMoveLow = 0 ;
lDistanceToMoveHigh = 0 ;
dwResult = SetFilePointer (psf->file.handle, lDistanceToMoveLow, &lDistanceToMoveHigh, FILE_CURRENT) ;
if (dwResult == 0xFFFFFFFF)
dwError = GetLastError () ;
else
dwError = NO_ERROR ;
if (dwError != NO_ERROR)
{ psf_log_syserr (psf, dwError) ;
return -1 ;
} ;
pos = (dwResult + ((__int64) lDistanceToMoveHigh << 32)) ;
return pos - psf->fileoffset ;
} /* psf_ftell */
/* USE_WINDOWS_API */ static int
psf_close_handle (HANDLE handle)
{ if (handle == NULL)
return 0 ;
if (CloseHandle (handle) == 0)
return -1 ;
return 0 ;
} /* psf_close_handle */
/* USE_WINDOWS_API */ sf_count_t
psf_fgets (char *buffer, sf_count_t bufsize, SF_PRIVATE *psf)
{ sf_count_t k = 0 ;
sf_count_t count ;
DWORD dwNumberOfBytesRead ;
while (k < bufsize - 1)
{ if (ReadFile (psf->file.handle, &(buffer [k]), 1, &dwNumberOfBytesRead, 0) == 0)
{ psf_log_syserr (psf, GetLastError ()) ;
break ;
}
else
{ count = dwNumberOfBytesRead ;
/* note that we only check for '\n' not other line endings such as CRLF */
if (count == 0 || buffer [k++] == '\n')
break ;
} ;
} ;
buffer [k] = 0 ;
return k ;
} /* psf_fgets */
/* USE_WINDOWS_API */ int
psf_is_pipe (SF_PRIVATE *psf)
{
if (psf->virtual_io)
return SF_FALSE ;
if (GetFileType (psf->file.handle) == FILE_TYPE_DISK)
return SF_FALSE ;
/* Default to maximum safety. */
return SF_TRUE ;
} /* psf_is_pipe */
/* USE_WINDOWS_API */ sf_count_t
psf_get_filelen_handle (HANDLE handle)
{ sf_count_t filelen ;
DWORD dwFileSizeLow, dwFileSizeHigh, dwError = NO_ERROR ;
dwFileSizeLow = GetFileSize (handle, &dwFileSizeHigh) ;
if (dwFileSizeLow == 0xFFFFFFFF)
dwError = GetLastError () ;
if (dwError != NO_ERROR)
return (sf_count_t) -1 ;
filelen = dwFileSizeLow + ((__int64) dwFileSizeHigh << 32) ;
return filelen ;
} /* psf_get_filelen_handle */
/* USE_WINDOWS_API */ void
psf_fsync (SF_PRIVATE *psf)
{ FlushFileBuffers (psf->file.handle) ;
} /* psf_fsync */
/* USE_WINDOWS_API */ int
psf_ftruncate (SF_PRIVATE *psf, sf_count_t len)
{ int retval = 0 ;
LONG lDistanceToMoveLow, lDistanceToMoveHigh ;
DWORD dwResult, dwError = NO_ERROR ;
/* This implementation trashes the current file position.
** should it save and restore it? what if the current position is past
** the new end of file?
*/
/* Returns 0 on success, non-zero on failure. */
if (len < 0)
return 1 ;
lDistanceToMoveLow = (DWORD) (len & 0xFFFFFFFF) ;
lDistanceToMoveHigh = (DWORD) ((len >> 32) & 0xFFFFFFFF) ;
dwResult = SetFilePointer (psf->file.handle, lDistanceToMoveLow, &lDistanceToMoveHigh, FILE_BEGIN) ;
if (dwResult == 0xFFFFFFFF)
dwError = GetLastError () ;
if (dwError != NO_ERROR)
{ retval = -1 ;
psf_log_syserr (psf, dwError) ;
}
else
{ /* Note: when SetEndOfFile is used to extend a file, the contents of the
** new portion of the file is undefined. This is unlike chsize(),
** which guarantees that the new portion of the file will be zeroed.
** Not sure if this is important or not.
*/
if (SetEndOfFile (psf->file.handle) == 0)
{ retval = -1 ;
psf_log_syserr (psf, GetLastError ()) ;
} ;
} ;
return retval ;
} /* psf_ftruncate */
#else
/* Win32 file i/o functions implemented using Unix-style file i/o API */
/* Win32 has a 64 file offset seek function:
**
** __int64 _lseeki64 (int handle, __int64 offset, int origin) ;
**
** It also has a 64 bit fstat function:
**
** int fstati64 (int, struct _stati64) ;
**
** but the fscking thing doesn't work!!!!! The file size parameter returned
** by this function is only valid up until more data is written at the end of
** the file. That makes this function completely 100% useless.
*/
#include <io.h>
#include <direct.h>
/* Win32 */ int
psf_fopen (SF_PRIVATE *psf, const char *pathname, int open_mode)
{ int oflag, mode ;
switch (open_mode)
{ case SFM_READ :
oflag = O_RDONLY | O_BINARY ;
mode = 0 ;
break ;
case SFM_WRITE :
oflag = O_WRONLY | O_CREAT | O_TRUNC | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP ;
break ;
case SFM_RDWR :
oflag = O_RDWR | O_CREAT | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP ;
break ;
default :
psf->error = SFE_BAD_OPEN_MODE ;
return -1 ;
break ;
} ;
if (mode == 0)
psf->file.filedes = open (pathname, oflag) ;
else
psf->file.filedes = open (pathname, oflag, mode) ;
if (psf->file.filedes == -1)
psf_log_syserr (psf, errno) ;
return psf->file.filedes ;
} /* psf_fopen */
/* Win32 */ sf_count_t
psf_fseek (SF_PRIVATE *psf, sf_count_t offset, int whence)
{ sf_count_t new_position ;
if (psf->virtual_io)
return psf->vio.seek (offset, whence, psf->vio_user_data) ;
switch (whence)
{ case SEEK_SET :
offset += psf->fileoffset ;
break ;
case SEEK_END :
if (psf->file.mode == SFM_WRITE)
{ new_position = _lseeki64 (psf->file.filedes, offset, whence) ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
return new_position - psf->fileoffset ;
} ;
/* Transform SEEK_END into a SEEK_SET, ie find the file
** length add the requested offset (should be <= 0) to
** get the offset wrt the start of file.
*/
whence = SEEK_SET ;
offset = _lseeki64 (psf->file.filedes, 0, SEEK_END) + offset ;
break ;
default :
/* No need to do anything about SEEK_CUR. */
break ;
} ;
/*
** Bypass weird Win32-ism if necessary.
** _lseeki64() returns an "invalid parameter" error if called with the
** offset == 0 and whence == SEEK_CUR.
*** Use the _telli64() function instead.
*/
if (offset == 0 && whence == SEEK_CUR)
new_position = _telli64 (psf->file.filedes) ;
else
new_position = _lseeki64 (psf->file.filedes, offset, whence) ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
new_position -= psf->fileoffset ;
return new_position ;
} /* psf_fseek */
/* Win32 */ sf_count_t
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
count = read (psf->file.filedes, ((char*) ptr) + total, (size_t) count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
return total / bytes ;
} /* psf_fread */
/* Win32 */ sf_count_t
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : items ;
count = write (psf->file.filedes, ((const char*) ptr) + total, count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
return total / bytes ;
} /* psf_fwrite */
/* Win32 */ sf_count_t
psf_ftell (SF_PRIVATE *psf)
{ sf_count_t pos ;
if (psf->virtual_io)
return psf->vio.tell (psf->vio_user_data) ;
pos = _telli64 (psf->file.filedes) ;
if (pos == ((sf_count_t) -1))
{ psf_log_syserr (psf, errno) ;
return -1 ;
} ;
return pos - psf->fileoffset ;
} /* psf_ftell */
/* Win32 */ int
psf_fclose (SF_PRIVATE *psf)
{ int retval ;
while ((retval = close (psf->file.filedes)) == -1 && errno == EINTR)
/* Do nothing. */ ;
if (retval == -1)
psf_log_syserr (psf, errno) ;
psf->file.filedes = -1 ;
return retval ;
} /* psf_fclose */
/* Win32 */ sf_count_t
psf_fgets (char *buffer, sf_count_t bufsize, SF_PRIVATE *psf)
{ sf_count_t k = 0 ;
sf_count_t count ;
while (k < bufsize - 1)
{ count = read (psf->file.filedes, &(buffer [k]), 1) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0 || buffer [k++] == '\n')
break ;
} ;
buffer [k] = 0 ;
return k ;
} /* psf_fgets */
/* Win32 */ int
psf_is_pipe (SF_PRIVATE *psf)
{ struct stat statbuf ;
if (psf->virtual_io)
return SF_FALSE ;
/* Not sure if this works. */
if (fstat (psf->file.filedes, &statbuf) == -1)
{ psf_log_syserr (psf, errno) ;
/* Default to maximum safety. */
return SF_TRUE ;
} ;
/* These macros are defined in Win32/unistd.h. */
if (S_ISFIFO (statbuf.st_mode) || S_ISSOCK (statbuf.st_mode))
return SF_TRUE ;
return SF_FALSE ;
} /* psf_checkpipe */
/* Win32 */ sf_count_t
psf_get_filelen (SF_PRIVATE *psf)
{
#if 0
/*
** Windoze is SOOOOO FUCKED!!!!!!!
** This code should work but doesn't. Why?
** Code below does work.
*/
struct _stati64 statbuf ;
if (_fstati64 (psf->file.filedes, &statbuf))
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
return statbuf.st_size ;
#else
sf_count_t current, filelen ;
if (psf->virtual_io)
return psf->vio.get_filelen (psf->vio_user_data) ;
if ((current = _telli64 (psf->file.filedes)) < 0)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
/*
** Lets face it, windoze if FUBAR!!!
**
** For some reason, I have to call _lseeki64() TWICE to get to the
** end of the file.
**
** This might have been avoided if windows had implemented the POSIX
** standard function fsync() but NO, that would have been too easy.
**
** I am VERY close to saying that windoze will no longer be supported
** by libsndfile and changing the license to GPL at the same time.
*/
_lseeki64 (psf->file.filedes, 0, SEEK_END) ;
if ((filelen = _lseeki64 (psf->file.filedes, 0, SEEK_END)) < 0)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
if (filelen > current)
_lseeki64 (psf->file.filedes, current, SEEK_SET) ;
switch (psf->file.mode)
{ case SFM_WRITE :
filelen = filelen - psf->fileoffset ;
break ;
case SFM_READ :
if (psf->fileoffset > 0 && psf->filelength > 0)
filelen = psf->filelength ;
break ;
case SFM_RDWR :
/*
** Cannot open embedded files SFM_RDWR so we don't need to
** subtract psf->fileoffset. We already have the answer we
** need.
*/
break ;
default :
filelen = 0 ;
} ;
return filelen ;
#endif
} /* psf_get_filelen */
/* Win32 */ int
psf_ftruncate (SF_PRIVATE *psf, sf_count_t len)
{ int retval ;
/* Returns 0 on success, non-zero on failure. */
if (len < 0)
return 1 ;
/* The global village idiots at micorsoft decided to implement
** nearly all the required 64 bit file offset functions except
** for one, truncate. The fscking morons!
**
** This is not 64 bit file offset clean. Somone needs to clean
** this up.
*/
if (len > 0x7FFFFFFF)
return -1 ;
retval = chsize (psf->file.filedes, len) ;
if (retval == -1)
psf_log_syserr (psf, errno) ;
return retval ;
} /* psf_ftruncate */
static void
psf_log_syserr (SF_PRIVATE *psf, int error)
{
/* Only log an error if no error has been set yet. */
if (psf->error == 0)
{ psf->error = SFE_SYSTEM ;
snprintf (psf->syserr, sizeof (psf->syserr), "System error : %s", strerror (error)) ;
} ;
return ;
} /* psf_log_syserr */
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2417_0 |
crossvul-cpp_data_good_5755_2 | /*
* BRIEF MODULE DESCRIPTION
* Au1200 LCD Driver.
*
* Copyright 2004-2005 AMD
* Author: AMD
*
* Based on:
* linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
* Created 28 Dec 1997 by Geert Uytterhoeven
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1200fb.h> /* platform_data */
#include "au1200fb.h"
#define DRIVER_NAME "au1200fb"
#define DRIVER_DESC "LCD controller driver for AU1200 processors"
#define DEBUG 0
#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg)
#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg)
#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg)
#if DEBUG
#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg)
#else
#define print_dbg(f, arg...) do {} while (0)
#endif
#define AU1200_LCD_FB_IOCTL 0x46FF
#define AU1200_LCD_SET_SCREEN 1
#define AU1200_LCD_GET_SCREEN 2
#define AU1200_LCD_SET_WINDOW 3
#define AU1200_LCD_GET_WINDOW 4
#define AU1200_LCD_SET_PANEL 5
#define AU1200_LCD_GET_PANEL 6
#define SCREEN_SIZE (1<< 1)
#define SCREEN_BACKCOLOR (1<< 2)
#define SCREEN_BRIGHTNESS (1<< 3)
#define SCREEN_COLORKEY (1<< 4)
#define SCREEN_MASK (1<< 5)
struct au1200_lcd_global_regs_t {
unsigned int flags;
unsigned int xsize;
unsigned int ysize;
unsigned int backcolor;
unsigned int brightness;
unsigned int colorkey;
unsigned int mask;
unsigned int panel_choice;
char panel_desc[80];
};
#define WIN_POSITION (1<< 0)
#define WIN_ALPHA_COLOR (1<< 1)
#define WIN_ALPHA_MODE (1<< 2)
#define WIN_PRIORITY (1<< 3)
#define WIN_CHANNEL (1<< 4)
#define WIN_BUFFER_FORMAT (1<< 5)
#define WIN_COLOR_ORDER (1<< 6)
#define WIN_PIXEL_ORDER (1<< 7)
#define WIN_SIZE (1<< 8)
#define WIN_COLORKEY_MODE (1<< 9)
#define WIN_DOUBLE_BUFFER_MODE (1<< 10)
#define WIN_RAM_ARRAY_MODE (1<< 11)
#define WIN_BUFFER_SCALE (1<< 12)
#define WIN_ENABLE (1<< 13)
struct au1200_lcd_window_regs_t {
unsigned int flags;
unsigned int xpos;
unsigned int ypos;
unsigned int alpha_color;
unsigned int alpha_mode;
unsigned int priority;
unsigned int channel;
unsigned int buffer_format;
unsigned int color_order;
unsigned int pixel_order;
unsigned int xsize;
unsigned int ysize;
unsigned int colorkey_mode;
unsigned int double_buffer_mode;
unsigned int ram_array_mode;
unsigned int xscale;
unsigned int yscale;
unsigned int enable;
};
struct au1200_lcd_iodata_t {
unsigned int subcmd;
struct au1200_lcd_global_regs_t global;
struct au1200_lcd_window_regs_t window;
};
#if defined(__BIG_ENDIAN)
#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11
#else
#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00
#endif
#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565
/* Private, per-framebuffer management information (independent of the panel itself) */
struct au1200fb_device {
struct fb_info *fb_info; /* FB driver info record */
struct au1200fb_platdata *pd;
int plane;
unsigned char* fb_mem; /* FrameBuffer memory map */
unsigned int fb_len;
dma_addr_t fb_phys;
};
/********************************************************************/
/* LCD controller restrictions */
#define AU1200_LCD_MAX_XRES 1280
#define AU1200_LCD_MAX_YRES 1024
#define AU1200_LCD_MAX_BPP 32
#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */
#define AU1200_LCD_NBR_PALETTE_ENTRIES 256
/* Default number of visible screen buffer to allocate */
#define AU1200FB_NBR_VIDEO_BUFFERS 1
/* Default maximum number of fb devices to create */
#define MAX_DEVICE_COUNT 4
/* Default window configuration entry to use (see windows[]) */
#define DEFAULT_WINDOW_INDEX 2
/********************************************************************/
static struct fb_info *_au1200fb_infos[MAX_DEVICE_COUNT];
static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR;
static int device_count = MAX_DEVICE_COUNT;
static int window_index = DEFAULT_WINDOW_INDEX; /* default is zero */
static int panel_index = 2; /* default is zero */
static struct window_settings *win;
static struct panel_settings *panel;
static int noblanking = 1;
static int nohwcursor = 0;
struct window_settings {
unsigned char name[64];
uint32 mode_backcolor;
uint32 mode_colorkey;
uint32 mode_colorkeymsk;
struct {
int xres;
int yres;
int xpos;
int ypos;
uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */
uint32 mode_winenable;
} w[4];
};
#if defined(__BIG_ENDIAN)
#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00
#else
#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
#endif
/*
* Default window configurations
*/
static struct window_settings windows[] = {
{ /* Index 0 */
"0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
/* mode_backcolor */ 0x006600ff,
/* mode_colorkey,msk*/ 0, 0,
{
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ LCD_WINENABLE_WEN0,
},
{
/* xres, yres, xpos, ypos */ 100, 100, 100, 100,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ LCD_WINENABLE_WEN1,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ 0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0,
},
},
},
{ /* Index 1 */
"0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
/* mode_backcolor */ 0x006600ff,
/* mode_colorkey,msk*/ 0, 0,
{
{
/* xres, yres, xpos, ypos */ 320, 240, 5, 5,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP |
LCD_WINCTRL1_PO_00,
/* mode_winenable*/ LCD_WINENABLE_WEN0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565
| LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ 0,
},
{
/* xres, yres, xpos, ypos */ 100, 100, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
},
{
/* xres, yres, xpos, ypos */ 200, 25, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0,
},
},
},
{ /* Index 2 */
"0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
/* mode_backcolor */ 0x006600ff,
/* mode_colorkey,msk*/ 0, 0,
{
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ LCD_WINENABLE_WEN0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ 0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP |
LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0,
},
},
},
/* Need VGA 640 @ 24bpp, @ 32bpp */
/* Need VGA 800 @ 24bpp, @ 32bpp */
/* Need VGA 1024 @ 24bpp, @ 32bpp */
};
/*
* Controller configurations for various panels.
*/
struct panel_settings
{
const char name[25]; /* Full name <vendor>_<model> */
struct fb_monspecs monspecs; /* FB monitor specs */
/* panel timings */
uint32 mode_screen;
uint32 mode_horztiming;
uint32 mode_verttiming;
uint32 mode_clkcontrol;
uint32 mode_pwmdiv;
uint32 mode_pwmhi;
uint32 mode_outmask;
uint32 mode_fifoctrl;
uint32 mode_toyclksrc;
uint32 mode_backlight;
uint32 mode_auxpll;
#define Xres min_xres
#define Yres min_yres
u32 min_xres; /* Minimum horizontal resolution */
u32 max_xres; /* Maximum horizontal resolution */
u32 min_yres; /* Minimum vertical resolution */
u32 max_yres; /* Maximum vertical resolution */
};
/********************************************************************/
/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */
/* List of panels known to work with the AU1200 LCD controller.
* To add a new panel, enter the same specifications as the
* Generic_TFT one, and MAKE SURE that it doesn't conflicts
* with the controller restrictions. Restrictions are:
*
* STN color panels: max_bpp <= 12
* STN mono panels: max_bpp <= 4
* TFT panels: max_bpp <= 16
* max_xres <= 800
* max_yres <= 600
*/
static struct panel_settings known_lcd_panels[] =
{
[0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */
.name = "QVGA_320x240",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(320) |
LCD_SCREEN_SY_N(240),
.mode_horztiming = 0x00c4623b,
.mode_verttiming = 0x00502814,
.mode_clkcontrol = 0x00020002, /* /4=24Mhz */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
320, 320,
240, 240,
},
[1] = { /* VGA 640x480 H:30.3kHz V:58Hz */
.name = "VGA_640x480",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x13f9df80,
.mode_horztiming = 0x003c5859,
.mode_verttiming = 0x00741201,
.mode_clkcontrol = 0x00020001, /* /4=24Mhz */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
640, 480,
640, 480,
},
[2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */
.name = "SVGA_800x600",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x18fa5780,
.mode_horztiming = 0x00dc7e77,
.mode_verttiming = 0x00584805,
.mode_clkcontrol = 0x00020000, /* /2=48Mhz */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
800, 800,
600, 600,
},
[3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */
.name = "XVGA_1024x768",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x1ffaff80,
.mode_horztiming = 0x007d0e57,
.mode_verttiming = 0x00740a01,
.mode_clkcontrol = 0x000A0000, /* /1 */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 6, /* 72MHz AUXPLL */
1024, 1024,
768, 768,
},
[4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */
.name = "XVGA_1280x1024",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x27fbff80,
.mode_horztiming = 0x00cdb2c7,
.mode_verttiming = 0x00600002,
.mode_clkcontrol = 0x000A0000, /* /1 */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 10, /* 120MHz AUXPLL */
1280, 1280,
1024, 1024,
},
[5] = { /* Samsung 1024x768 TFT */
.name = "Samsung_1024x768_TFT",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x1ffaff80,
.mode_horztiming = 0x018cc677,
.mode_verttiming = 0x00241217,
.mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */
.mode_pwmdiv = 0x8000063f, /* SCB 0x0 */
.mode_pwmhi = 0x03400000, /* SCB 0x0 */
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
1024, 1024,
768, 768,
},
[6] = { /* Toshiba 640x480 TFT */
.name = "Toshiba_640x480_TFT",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(640) |
LCD_SCREEN_SY_N(480),
.mode_horztiming = LCD_HORZTIMING_HPW_N(96) |
LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51),
.mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32),
.mode_clkcontrol = 0x00000000, /* /4=24Mhz */
.mode_pwmdiv = 0x8000063f,
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
640, 480,
640, 480,
},
[7] = { /* Sharp 320x240 TFT */
.name = "Sharp_320x240_TFT",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 12500,
.hfmax = 20000,
.vfmin = 38,
.vfmax = 81,
.dclkmin = 4500000,
.dclkmax = 6800000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(320) |
LCD_SCREEN_SY_N(240),
.mode_horztiming = LCD_HORZTIMING_HPW_N(60) |
LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2),
.mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5),
.mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/
.mode_pwmdiv = 0x8000063f,
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
320, 320,
240, 240,
},
[8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */
.name = "Toppoly_TD070WGCB2",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(856) |
LCD_SCREEN_SY_N(480),
.mode_horztiming = LCD_HORZTIMING_HND2_N(43) |
LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114),
.mode_verttiming = LCD_VERTTIMING_VND2_N(20) |
LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4),
.mode_clkcontrol = 0x00020001, /* /4=24Mhz */
.mode_pwmdiv = 0x8000063f,
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
856, 856,
480, 480,
},
[9] = {
.name = "DB1300_800x480",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(800) |
LCD_SCREEN_SY_N(480),
.mode_horztiming = LCD_HORZTIMING_HPW_N(5) |
LCD_HORZTIMING_HND1_N(16) |
LCD_HORZTIMING_HND2_N(8),
.mode_verttiming = LCD_VERTTIMING_VPW_N(4) |
LCD_VERTTIMING_VND1_N(8) |
LCD_VERTTIMING_VND2_N(5),
.mode_clkcontrol = LCD_CLKCONTROL_PCD_N(1) |
LCD_CLKCONTROL_IV |
LCD_CLKCONTROL_IH,
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = (48/12) * 2,
800, 800,
480, 480,
},
};
#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
/********************************************************************/
static int winbpp (unsigned int winctrl1)
{
int bits = 0;
/* how many bits are needed for each pixel format */
switch (winctrl1 & LCD_WINCTRL1_FRM) {
case LCD_WINCTRL1_FRM_1BPP:
bits = 1;
break;
case LCD_WINCTRL1_FRM_2BPP:
bits = 2;
break;
case LCD_WINCTRL1_FRM_4BPP:
bits = 4;
break;
case LCD_WINCTRL1_FRM_8BPP:
bits = 8;
break;
case LCD_WINCTRL1_FRM_12BPP:
case LCD_WINCTRL1_FRM_16BPP655:
case LCD_WINCTRL1_FRM_16BPP565:
case LCD_WINCTRL1_FRM_16BPP556:
case LCD_WINCTRL1_FRM_16BPPI1555:
case LCD_WINCTRL1_FRM_16BPPI5551:
case LCD_WINCTRL1_FRM_16BPPA1555:
case LCD_WINCTRL1_FRM_16BPPA5551:
bits = 16;
break;
case LCD_WINCTRL1_FRM_24BPP:
case LCD_WINCTRL1_FRM_32BPP:
bits = 32;
break;
}
return bits;
}
static int fbinfo2index (struct fb_info *fb_info)
{
int i;
for (i = 0; i < device_count; ++i) {
if (fb_info == _au1200fb_infos[i])
return i;
}
printk("au1200fb: ERROR: fbinfo2index failed!\n");
return -1;
}
static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
int xpos, int ypos)
{
uint32 winctrl0, winctrl1, winenable, fb_offset = 0;
int xsz, ysz;
/* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */
winctrl0 = lcd->window[plane].winctrl0;
winctrl1 = lcd->window[plane].winctrl1;
winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN);
winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY);
/* Check for off-screen adjustments */
xsz = win->w[plane].xres;
ysz = win->w[plane].yres;
if ((xpos + win->w[plane].xres) > panel->Xres) {
/* Off-screen to the right */
xsz = panel->Xres - xpos; /* off by 1 ??? */
/*printk("off screen right\n");*/
}
if ((ypos + win->w[plane].yres) > panel->Yres) {
/* Off-screen to the bottom */
ysz = panel->Yres - ypos; /* off by 1 ??? */
/*printk("off screen bottom\n");*/
}
if (xpos < 0) {
/* Off-screen to the left */
xsz = win->w[plane].xres + xpos;
fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8);
xpos = 0;
/*printk("off screen left\n");*/
}
if (ypos < 0) {
/* Off-screen to the top */
ysz = win->w[plane].yres + ypos;
/* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */
ypos = 0;
/*printk("off screen top\n");*/
}
/* record settings */
win->w[plane].xpos = xpos;
win->w[plane].ypos = ypos;
xsz -= 1;
ysz -= 1;
winctrl0 |= (xpos << 21);
winctrl0 |= (ypos << 10);
winctrl1 |= (xsz << 11);
winctrl1 |= (ysz << 0);
/* Disable the window while making changes, then restore WINEN */
winenable = lcd->winenable & (1 << plane);
au_sync();
lcd->winenable &= ~(1 << plane);
lcd->window[plane].winctrl0 = winctrl0;
lcd->window[plane].winctrl1 = winctrl1;
lcd->window[plane].winbuf0 =
lcd->window[plane].winbuf1 = fbdev->fb_phys;
lcd->window[plane].winbufctrl = 0; /* select winbuf0 */
lcd->winenable |= winenable;
au_sync();
return 0;
}
static void au1200_setpanel(struct panel_settings *newpanel,
struct au1200fb_platdata *pd)
{
/*
* Perform global setup/init of LCD controller
*/
uint32 winenable;
/* Make sure all windows disabled */
winenable = lcd->winenable;
lcd->winenable = 0;
au_sync();
/*
* Ensure everything is disabled before reconfiguring
*/
if (lcd->screen & LCD_SCREEN_SEN) {
/* Wait for vertical sync period */
lcd->intstatus = LCD_INT_SS;
while ((lcd->intstatus & LCD_INT_SS) == 0) {
au_sync();
}
lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/
do {
lcd->intstatus = lcd->intstatus; /*clear interrupts*/
au_sync();
/*wait for controller to shut down*/
} while ((lcd->intstatus & LCD_INT_SD) == 0);
/* Call shutdown of current panel (if up) */
/* this must occur last, because if an external clock is driving
the controller, the clock cannot be turned off before first
shutting down the controller.
*/
if (pd->panel_shutdown)
pd->panel_shutdown();
}
/* Newpanel == NULL indicates a shutdown operation only */
if (newpanel == NULL)
return;
panel = newpanel;
printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres);
/*
* Setup clocking if internal LCD clock source (assumes sys_auxpll valid)
*/
if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT))
{
uint32 sys_clksrc;
au_writel(panel->mode_auxpll, SYS_AUXPLL);
sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f;
sys_clksrc |= panel->mode_toyclksrc;
au_writel(sys_clksrc, SYS_CLKSRC);
}
/*
* Configure panel timings
*/
lcd->screen = panel->mode_screen;
lcd->horztiming = panel->mode_horztiming;
lcd->verttiming = panel->mode_verttiming;
lcd->clkcontrol = panel->mode_clkcontrol;
lcd->pwmdiv = panel->mode_pwmdiv;
lcd->pwmhi = panel->mode_pwmhi;
lcd->outmask = panel->mode_outmask;
lcd->fifoctrl = panel->mode_fifoctrl;
au_sync();
/* fixme: Check window settings to make sure still valid
* for new geometry */
#if 0
au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos);
au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos);
au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos);
au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos);
#endif
lcd->winenable = winenable;
/*
* Re-enable screen now that it is configured
*/
lcd->screen |= LCD_SCREEN_SEN;
au_sync();
/* Call init of panel */
if (pd->panel_init)
pd->panel_init();
/* FIX!!!! not appropriate on panel change!!! Global setup/init */
lcd->intenable = 0;
lcd->intstatus = ~0;
lcd->backcolor = win->mode_backcolor;
/* Setup Color Key - FIX!!! */
lcd->colorkey = win->mode_colorkey;
lcd->colorkeymsk = win->mode_colorkeymsk;
/* Setup HWCursor - FIX!!! Need to support this eventually */
lcd->hwc.cursorctrl = 0;
lcd->hwc.cursorpos = 0;
lcd->hwc.cursorcolor0 = 0;
lcd->hwc.cursorcolor1 = 0;
lcd->hwc.cursorcolor2 = 0;
lcd->hwc.cursorcolor3 = 0;
#if 0
#define D(X) printk("%25s: %08X\n", #X, X)
D(lcd->screen);
D(lcd->horztiming);
D(lcd->verttiming);
D(lcd->clkcontrol);
D(lcd->pwmdiv);
D(lcd->pwmhi);
D(lcd->outmask);
D(lcd->fifoctrl);
D(lcd->window[0].winctrl0);
D(lcd->window[0].winctrl1);
D(lcd->window[0].winctrl2);
D(lcd->window[0].winbuf0);
D(lcd->window[0].winbuf1);
D(lcd->window[0].winbufctrl);
D(lcd->window[1].winctrl0);
D(lcd->window[1].winctrl1);
D(lcd->window[1].winctrl2);
D(lcd->window[1].winbuf0);
D(lcd->window[1].winbuf1);
D(lcd->window[1].winbufctrl);
D(lcd->window[2].winctrl0);
D(lcd->window[2].winctrl1);
D(lcd->window[2].winctrl2);
D(lcd->window[2].winbuf0);
D(lcd->window[2].winbuf1);
D(lcd->window[2].winbufctrl);
D(lcd->window[3].winctrl0);
D(lcd->window[3].winctrl1);
D(lcd->window[3].winctrl2);
D(lcd->window[3].winbuf0);
D(lcd->window[3].winbuf1);
D(lcd->window[3].winbufctrl);
D(lcd->winenable);
D(lcd->intenable);
D(lcd->intstatus);
D(lcd->backcolor);
D(lcd->winenable);
D(lcd->colorkey);
D(lcd->colorkeymsk);
D(lcd->hwc.cursorctrl);
D(lcd->hwc.cursorpos);
D(lcd->hwc.cursorcolor0);
D(lcd->hwc.cursorcolor1);
D(lcd->hwc.cursorcolor2);
D(lcd->hwc.cursorcolor3);
#endif
}
static void au1200_setmode(struct au1200fb_device *fbdev)
{
int plane = fbdev->plane;
/* Window/plane setup */
lcd->window[plane].winctrl1 = ( 0
| LCD_WINCTRL1_PRI_N(plane)
| win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */
) ;
au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos);
lcd->window[plane].winctrl2 = ( 0
| LCD_WINCTRL2_CKMODE_00
| LCD_WINCTRL2_DBM
| LCD_WINCTRL2_BX_N(fbdev->fb_info->fix.line_length)
| LCD_WINCTRL2_SCX_1
| LCD_WINCTRL2_SCY_1
) ;
lcd->winenable |= win->w[plane].mode_winenable;
au_sync();
}
/* Inline helpers */
/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN)
/* Bitfields format supported by the controller. */
static struct fb_bitfield rgb_bitfields[][4] = {
/* Red, Green, Blue, Transp */
[LCD_WINCTRL1_FRM_16BPP655 >> 25] =
{ { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPP565 >> 25] =
{ { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPP556 >> 25] =
{ { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPPI1555 >> 25] =
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPPI5551 >> 25] =
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPPA1555 >> 25] =
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
[LCD_WINCTRL1_FRM_16BPPA5551 >> 25] =
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
[LCD_WINCTRL1_FRM_24BPP >> 25] =
{ { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_32BPP >> 25] =
{ { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } },
};
/*-------------------------------------------------------------------------*/
/* Helpers */
static void au1200fb_update_fbinfo(struct fb_info *fbi)
{
/* FIX!!!! This also needs to take the window pixel format into account!!! */
/* Update var-dependent FB info */
if (panel_is_color(panel)) {
if (fbi->var.bits_per_pixel <= 8) {
/* palettized */
fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR;
fbi->fix.line_length = fbi->var.xres_virtual /
(8/fbi->var.bits_per_pixel);
} else {
/* non-palettized */
fbi->fix.visual = FB_VISUAL_TRUECOLOR;
fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8);
}
} else {
/* mono FIX!!! mono 8 and 4 bits */
fbi->fix.visual = FB_VISUAL_MONO10;
fbi->fix.line_length = fbi->var.xres_virtual / 8;
}
fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual;
print_dbg("line length: %d\n", fbi->fix.line_length);
print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel);
}
/*-------------------------------------------------------------------------*/
/* AU1200 framebuffer driver */
/* fb_check_var
* Validate var settings with hardware restrictions and modify it if necessary
*/
static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct au1200fb_device *fbdev = fbi->par;
u32 pixclock;
int screen_size, plane;
plane = fbdev->plane;
/* Make sure that the mode respect all LCD controller and
* panel restrictions. */
var->xres = win->w[plane].xres;
var->yres = win->w[plane].yres;
/* No need for virtual resolution support */
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1);
screen_size = var->xres_virtual * var->yres_virtual;
if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8);
else screen_size /= (8/var->bits_per_pixel);
if (fbdev->fb_len < screen_size)
return -EINVAL; /* Virtual screen is to big, abort */
/* FIX!!!! what are the implicaitons of ignoring this for windows ??? */
/* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel
* clock can only be obtain by dividing this value by an even integer.
* Fallback to a slower pixel clock if necessary. */
pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
pixclock = min3(pixclock, fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2);
if (AU1200_LCD_MAX_CLK % pixclock) {
int diff = AU1200_LCD_MAX_CLK % pixclock;
pixclock -= diff;
}
var->pixclock = KHZ2PICOS(pixclock/1000);
#if 0
if (!panel_is_active(panel)) {
int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1;
if (!panel_is_color(panel)
&& (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) {
/* STN 8bit mono panel support is up to 6MHz pixclock */
var->pixclock = KHZ2PICOS(6000);
} else if (!pcd) {
/* Other STN panel support is up to 12MHz */
var->pixclock = KHZ2PICOS(12000);
}
}
#endif
/* Set bitfield accordingly */
switch (var->bits_per_pixel) {
case 16:
{
/* 16bpp True color.
* These must be set to MATCH WINCTRL[FORM] */
int idx;
idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
var->red = rgb_bitfields[idx][0];
var->green = rgb_bitfields[idx][1];
var->blue = rgb_bitfields[idx][2];
var->transp = rgb_bitfields[idx][3];
break;
}
case 32:
{
/* 32bpp True color.
* These must be set to MATCH WINCTRL[FORM] */
int idx;
idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
var->red = rgb_bitfields[idx][0];
var->green = rgb_bitfields[idx][1];
var->blue = rgb_bitfields[idx][2];
var->transp = rgb_bitfields[idx][3];
break;
}
default:
print_dbg("Unsupported depth %dbpp", var->bits_per_pixel);
return -EINVAL;
}
return 0;
}
/* fb_set_par
* Set hardware with var settings. This will enable the controller with a
* specific mode, normally validated with the fb_check_var method
*/
static int au1200fb_fb_set_par(struct fb_info *fbi)
{
struct au1200fb_device *fbdev = fbi->par;
au1200fb_update_fbinfo(fbi);
au1200_setmode(fbdev);
return 0;
}
/* fb_setcolreg
* Set color in LCD palette.
*/
static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *fbi)
{
volatile u32 *palette = lcd->palette;
u32 value;
if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale */
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
}
if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
palette = (u32*) fbi->pseudo_palette;
red >>= (16 - fbi->var.red.length);
green >>= (16 - fbi->var.green.length);
blue >>= (16 - fbi->var.blue.length);
value = (red << fbi->var.red.offset) |
(green << fbi->var.green.offset)|
(blue << fbi->var.blue.offset);
value &= 0xFFFF;
} else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) {
/* COLOR TFT PALLETTIZED (use RGB 565) */
value = (red & 0xF800)|((green >> 5) &
0x07E0)|((blue >> 11) & 0x001F);
value &= 0xFFFF;
} else if (0 /*panel_is_color(fbdev->panel)*/) {
/* COLOR STN MODE */
value = 0x1234;
value &= 0xFFF;
} else {
/* MONOCHROME MODE */
value = (green >> 12) & 0x000F;
value &= 0xF;
}
palette[regno] = value;
return 0;
}
/* fb_blank
* Blank the screen. Depending on the mode, the screen will be
* activated with the backlight color, or desactivated
*/
static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct au1200fb_device *fbdev = fbi->par;
/* Short-circuit screen blanking */
if (noblanking)
return 0;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
/* printk("turn on panel\n"); */
au1200_setpanel(panel, fbdev->pd);
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
/* printk("turn off panel\n"); */
au1200_setpanel(NULL, fbdev->pd);
break;
default:
break;
}
/* FB_BLANK_NORMAL is a soft blank */
return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0;
}
/* fb_mmap
* Map video memory in user space. We don't use the generic fb_mmap
* method mainly to allow the use of the TLB streaming flag (CCA=6)
*/
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
{
unsigned int hi1, divider;
/* SCREEN_SIZE: user cannot reset size, must switch panel choice */
if (pdata->flags & SCREEN_BACKCOLOR)
lcd->backcolor = pdata->backcolor;
if (pdata->flags & SCREEN_BRIGHTNESS) {
// limit brightness pwm duty to >= 30/1600
if (pdata->brightness < 30) {
pdata->brightness = 30;
}
divider = (lcd->pwmdiv & 0x3FFFF) + 1;
hi1 = (lcd->pwmhi >> 16) + 1;
hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8);
lcd->pwmhi &= 0xFFFF;
lcd->pwmhi |= (hi1 << 16);
}
if (pdata->flags & SCREEN_COLORKEY)
lcd->colorkey = pdata->colorkey;
if (pdata->flags & SCREEN_MASK)
lcd->colorkeymsk = pdata->mask;
au_sync();
}
static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
{
unsigned int hi1, divider;
pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1;
pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1;
pdata->backcolor = lcd->backcolor;
pdata->colorkey = lcd->colorkey;
pdata->mask = lcd->colorkeymsk;
// brightness
hi1 = (lcd->pwmhi >> 16) + 1;
divider = (lcd->pwmdiv & 0x3FFFF) + 1;
pdata->brightness = ((hi1 << 8) / divider) - 1;
au_sync();
}
static void set_window(unsigned int plane,
struct au1200_lcd_window_regs_t *pdata)
{
unsigned int val, bpp;
/* Window control register 0 */
if (pdata->flags & WIN_POSITION) {
val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX |
LCD_WINCTRL0_OY);
val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX);
val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY);
lcd->window[plane].winctrl0 = val;
}
if (pdata->flags & WIN_ALPHA_COLOR) {
val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A);
val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A);
lcd->window[plane].winctrl0 = val;
}
if (pdata->flags & WIN_ALPHA_MODE) {
val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN);
val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN);
lcd->window[plane].winctrl0 = val;
}
/* Window control register 1 */
if (pdata->flags & WIN_PRIORITY) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI);
val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_CHANNEL) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE);
val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_BUFFER_FORMAT) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM);
val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_COLOR_ORDER) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO);
val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_PIXEL_ORDER) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO);
val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_SIZE) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX |
LCD_WINCTRL1_SZY);
val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX);
val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY);
lcd->window[plane].winctrl1 = val;
/* program buffer line width */
bpp = winbpp(val) / 8;
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX);
val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX);
lcd->window[plane].winctrl2 = val;
}
/* Window control register 2 */
if (pdata->flags & WIN_COLORKEY_MODE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE);
val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE);
lcd->window[plane].winctrl2 = val;
}
if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM);
val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM);
lcd->window[plane].winctrl2 = val;
}
if (pdata->flags & WIN_RAM_ARRAY_MODE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM);
val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM);
lcd->window[plane].winctrl2 = val;
}
/* Buffer line width programmed with WIN_SIZE */
if (pdata->flags & WIN_BUFFER_SCALE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX |
LCD_WINCTRL2_SCY);
val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX);
val |= ((pdata->ysize) & LCD_WINCTRL2_SCY);
lcd->window[plane].winctrl2 = val;
}
if (pdata->flags & WIN_ENABLE) {
val = lcd->winenable;
val &= ~(1<<plane);
val |= (pdata->enable & 1) << plane;
lcd->winenable = val;
}
au_sync();
}
static void get_window(unsigned int plane,
struct au1200_lcd_window_regs_t *pdata)
{
/* Window control register 0 */
pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21;
pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10;
pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2;
pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1;
/* Window control register 1 */
pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30;
pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29;
pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25;
pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24;
pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22;
pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1;
pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1;
/* Window control register 2 */
pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24;
pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23;
pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21;
pdata->enable = (lcd->winenable >> plane) & 1;
au_sync();
}
static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct au1200fb_device *fbdev = info->par;
int plane;
int val;
plane = fbinfo2index(info);
print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane);
if (cmd == AU1200_LCD_FB_IOCTL) {
struct au1200_lcd_iodata_t iodata;
if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata)))
return -EFAULT;
print_dbg("FB IOCTL called\n");
switch (iodata.subcmd) {
case AU1200_LCD_SET_SCREEN:
print_dbg("AU1200_LCD_SET_SCREEN\n");
set_global(cmd, &iodata.global);
break;
case AU1200_LCD_GET_SCREEN:
print_dbg("AU1200_LCD_GET_SCREEN\n");
get_global(cmd, &iodata.global);
break;
case AU1200_LCD_SET_WINDOW:
print_dbg("AU1200_LCD_SET_WINDOW\n");
set_window(plane, &iodata.window);
break;
case AU1200_LCD_GET_WINDOW:
print_dbg("AU1200_LCD_GET_WINDOW\n");
get_window(plane, &iodata.window);
break;
case AU1200_LCD_SET_PANEL:
print_dbg("AU1200_LCD_SET_PANEL\n");
if ((iodata.global.panel_choice >= 0) &&
(iodata.global.panel_choice <
NUM_PANELS))
{
struct panel_settings *newpanel;
panel_index = iodata.global.panel_choice;
newpanel = &known_lcd_panels[panel_index];
au1200_setpanel(newpanel, fbdev->pd);
}
break;
case AU1200_LCD_GET_PANEL:
print_dbg("AU1200_LCD_GET_PANEL\n");
iodata.global.panel_choice = panel_index;
break;
default:
return -EINVAL;
}
val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata));
if (val) {
print_dbg("error: could not copy %d bytes\n", val);
return -EFAULT;
}
}
return 0;
}
static struct fb_ops au1200fb_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = au1200fb_fb_check_var,
.fb_set_par = au1200fb_fb_set_par,
.fb_setcolreg = au1200fb_fb_setcolreg,
.fb_blank = au1200fb_fb_blank,
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_sync = NULL,
.fb_ioctl = au1200fb_ioctl,
.fb_mmap = au1200fb_fb_mmap,
};
/*-------------------------------------------------------------------------*/
static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
{
/* Nothing to do for now, just clear any pending interrupt */
lcd->intstatus = lcd->intstatus;
au_sync();
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
/* AU1200 LCD device probe helpers */
static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
{
struct fb_info *fbi = fbdev->fb_info;
int bpp;
fbi->fbops = &au1200fb_fb_ops;
bpp = winbpp(win->w[fbdev->plane].mode_winctrl1);
/* Copy monitor specs from panel data */
/* fixme: we're setting up LCD controller windows, so these dont give a
damn as to what the monitor specs are (the panel itself does, but that
isn't done here...so maybe need a generic catchall monitor setting??? */
memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs));
/* We first try the user mode passed in argument. If that failed,
* or if no one has been specified, we default to the first mode of the
* panel list. Note that after this call, var data will be set */
if (!fb_find_mode(&fbi->var,
fbi,
NULL, /* drv_info.opt_mode, */
fbi->monspecs.modedb,
fbi->monspecs.modedb_len,
fbi->monspecs.modedb,
bpp)) {
print_err("Cannot find valid mode for panel %s", panel->name);
return -EFAULT;
}
fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
if (!fbi->pseudo_palette) {
return -ENOMEM;
}
if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
AU1200_LCD_NBR_PALETTE_ENTRIES);
kfree(fbi->pseudo_palette);
return -EFAULT;
}
strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
fbi->fix.smem_start = fbdev->fb_phys;
fbi->fix.smem_len = fbdev->fb_len;
fbi->fix.type = FB_TYPE_PACKED_PIXELS;
fbi->fix.xpanstep = 0;
fbi->fix.ypanstep = 0;
fbi->fix.mmio_start = 0;
fbi->fix.mmio_len = 0;
fbi->fix.accel = FB_ACCEL_NONE;
fbi->screen_base = (char __iomem *) fbdev->fb_mem;
au1200fb_update_fbinfo(fbi);
return 0;
}
/*-------------------------------------------------------------------------*/
static int au1200fb_setup(struct au1200fb_platdata *pd)
{
char *options = NULL;
char *this_opt, *endptr;
int num_panels = ARRAY_SIZE(known_lcd_panels);
int panel_idx = -1;
fb_get_options(DRIVER_NAME, &options);
if (!options)
goto out;
while ((this_opt = strsep(&options, ",")) != NULL) {
/* Panel option - can be panel name,
* "bs" for board-switch, or number/index */
if (!strncmp(this_opt, "panel:", 6)) {
int i;
long int li;
char *endptr;
this_opt += 6;
/* First check for index, which allows
* to short circuit this mess */
li = simple_strtol(this_opt, &endptr, 0);
if (*endptr == '\0')
panel_idx = (int)li;
else if (strcmp(this_opt, "bs") == 0)
panel_idx = pd->panel_index();
else {
for (i = 0; i < num_panels; i++) {
if (!strcmp(this_opt,
known_lcd_panels[i].name)) {
panel_idx = i;
break;
}
}
}
if ((panel_idx < 0) || (panel_idx >= num_panels))
print_warn("Panel %s not supported!", this_opt);
else
panel_index = panel_idx;
} else if (strncmp(this_opt, "nohwcursor", 10) == 0)
nohwcursor = 1;
else if (strncmp(this_opt, "devices:", 8) == 0) {
this_opt += 8;
device_count = simple_strtol(this_opt, &endptr, 0);
if ((device_count < 0) ||
(device_count > MAX_DEVICE_COUNT))
device_count = MAX_DEVICE_COUNT;
} else if (strncmp(this_opt, "wincfg:", 7) == 0) {
this_opt += 7;
window_index = simple_strtol(this_opt, &endptr, 0);
if ((window_index < 0) ||
(window_index >= ARRAY_SIZE(windows)))
window_index = DEFAULT_WINDOW_INDEX;
} else if (strncmp(this_opt, "off", 3) == 0)
return 1;
else
print_warn("Unsupported option \"%s\"", this_opt);
}
out:
return 0;
}
/* AU1200 LCD controller device driver */
static int au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
struct au1200fb_platdata *pd;
struct fb_info *fbi = NULL;
unsigned long page;
int bpp, plane, ret, irq;
print_info("" DRIVER_DESC "");
pd = dev->dev.platform_data;
if (!pd)
return -ENODEV;
/* Setup driver with options */
if (au1200fb_setup(pd))
return -ENODEV;
/* Point to the panel selected */
panel = &known_lcd_panels[panel_index];
win = &windows[window_index];
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
/* shut gcc up */
ret = 0;
fbdev = NULL;
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
win->w[plane].xres = panel->Xres;
if (win->w[plane].yres == 0)
win->w[plane].yres = panel->Yres;
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
if (!fbi)
goto failed;
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
fbdev->fb_info = fbi;
fbdev->pd = pd;
fbdev->plane = plane;
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
}
/*
* Set page reserved so that mmap will work. This is necessary
* since we'll be remapping normal memory.
*/
for (page = (unsigned long)fbdev->fb_phys;
page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
fbdev->fb_len);
page += PAGE_SIZE) {
SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
}
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Init FB data */
if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
goto failed;
/* Register new framebuffer */
ret = register_framebuffer(fbi);
if (ret < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
au1200fb_fb_set_par(fbi);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
if (plane == 0)
if (fb_prepare_logo(fbi, FB_ROTATE_UR)) {
/* Start display and show logo on boot */
fb_set_cmap(&fbi->cmap, fbi);
fb_show_logo(fbi, FB_ROTATE_UR);
}
#endif
}
/* Now hook interrupt too */
irq = platform_get_irq(dev, 0);
ret = request_irq(irq, au1200fb_handle_irq,
IRQF_SHARED, "lcd", (void *)dev);
if (ret) {
print_err("fail to request interrupt line %d (err: %d)",
irq, ret);
goto failed;
}
platform_set_drvdata(dev, pd);
/* Kickstart the panel */
au1200_setpanel(panel, pd);
return 0;
failed:
/* NOTE: This only does the current plane/window that failed; others are still active */
if (fbi) {
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
}
if (plane == 0)
free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
static int au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
/* Turn off the panel */
au1200_setpanel(NULL, pd);
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
fbdev = fbi->par;
/* Clean up all probe data */
unregister_framebuffer(fbi);
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
framebuffer_release(fbi);
_au1200fb_infos[plane] = NULL;
}
free_irq(platform_get_irq(dev, 0), (void *)dev);
return 0;
}
#ifdef CONFIG_PM
static int au1200fb_drv_suspend(struct device *dev)
{
struct au1200fb_platdata *pd = dev_get_drvdata(dev);
au1200_setpanel(NULL, pd);
lcd->outmask = 0;
au_sync();
return 0;
}
static int au1200fb_drv_resume(struct device *dev)
{
struct au1200fb_platdata *pd = dev_get_drvdata(dev);
struct fb_info *fbi;
int i;
/* Kickstart the panel */
au1200_setpanel(panel, pd);
for (i = 0; i < device_count; i++) {
fbi = _au1200fb_infos[i];
au1200fb_fb_set_par(fbi);
}
return 0;
}
static const struct dev_pm_ops au1200fb_pmops = {
.suspend = au1200fb_drv_suspend,
.resume = au1200fb_drv_resume,
.freeze = au1200fb_drv_suspend,
.thaw = au1200fb_drv_resume,
};
#define AU1200FB_PMOPS (&au1200fb_pmops)
#else
#define AU1200FB_PMOPS NULL
#endif /* CONFIG_PM */
static struct platform_driver au1200fb_driver = {
.driver = {
.name = "au1200-lcd",
.owner = THIS_MODULE,
.pm = AU1200FB_PMOPS,
},
.probe = au1200fb_drv_probe,
.remove = au1200fb_drv_remove,
};
/*-------------------------------------------------------------------------*/
static int __init au1200fb_init(void)
{
return platform_driver_register(&au1200fb_driver);
}
static void __exit au1200fb_cleanup(void)
{
platform_driver_unregister(&au1200fb_driver);
}
module_init(au1200fb_init);
module_exit(au1200fb_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5755_2 |
crossvul-cpp_data_good_5506_0 | /* inffast.c -- fast decoding
* Copyright (C) 1995-2008, 2010, 2013 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zutil.h"
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#ifndef ASMINF
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
available, an end-of-block is encountered, or a data error is encountered.
When large enough input and output buffers are supplied to inflate(), for
example, a 16K input buffer and a 64K output buffer, more than 95% of the
inflate execution time is spent in this routine.
Entry assumptions:
state->mode == LEN
strm->avail_in >= 6
strm->avail_out >= 258
start >= strm->avail_out
state->bits < 8
On return, state->mode is one of:
LEN -- ran out of enough output space or enough available input
TYPE -- reached end of block code, inflate() to interpret next block
BAD -- error in block data
Notes:
- The maximum input bits used by a length/distance pair is 15 bits for the
length code, 5 bits for the length extra, 15 bits for the distance code,
and 13 bits for the distance extra. This totals 48 bits, or six bytes.
Therefore if strm->avail_in >= 6, then there is enough input to avoid
checking for available input while decoding.
- The maximum bytes that a single length/distance pair can output is 258
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
*/
void ZLIB_INTERNAL inflate_fast(strm, start)
z_streamp strm;
unsigned start; /* inflate()'s starting value for strm->avail_out */
{
struct inflate_state FAR *state;
z_const unsigned char FAR *in; /* local strm->next_in */
z_const unsigned char FAR *last; /* have enough input while in < last */
unsigned char FAR *out; /* local strm->next_out */
unsigned char FAR *beg; /* inflate()'s initial strm->next_out */
unsigned char FAR *end; /* while out < end, enough space available */
#ifdef INFLATE_STRICT
unsigned dmax; /* maximum distance from zlib header */
#endif
unsigned wsize; /* window size or zero if not using window */
unsigned whave; /* valid bytes in the window */
unsigned wnext; /* window write index */
unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */
unsigned long hold; /* local strm->hold */
unsigned bits; /* local strm->bits */
code const FAR *lcode; /* local strm->lencode */
code const FAR *dcode; /* local strm->distcode */
unsigned lmask; /* mask for first level of length codes */
unsigned dmask; /* mask for first level of distance codes */
code here; /* retrieved table entry */
unsigned op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
unsigned len; /* match length, unused bytes */
unsigned dist; /* match distance */
unsigned char FAR *from; /* where to copy match from */
/* copy state to local variables */
state = (struct inflate_state FAR *)strm->state;
in = strm->next_in;
last = in + (strm->avail_in - 5);
out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
dmax = state->dmax;
#endif
wsize = state->wsize;
whave = state->whave;
wnext = state->wnext;
window = state->window;
hold = state->hold;
bits = state->bits;
lcode = state->lencode;
dcode = state->distcode;
lmask = (1U << state->lenbits) - 1;
dmask = (1U << state->distbits) - 1;
/* decode literals and length/distances until end-of-block or not enough
input data or output space */
do {
if (bits < 15) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
here = lcode[hold & lmask];
dolen:
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op == 0) { /* literal */
Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
"inflate: literal '%c'\n" :
"inflate: literal 0x%02x\n", here.val));
*out++ = (unsigned char)(here.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
Tracevv((stderr, "inflate: length %u\n", len));
if (bits < 15) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
here = dcode[hold & dmask];
dodist:
op = (unsigned)(here.bits);
hold >>= op;
bits -= op;
op = (unsigned)(here.op);
if (op & 16) { /* distance base */
dist = (unsigned)(here.val);
op &= 15; /* number of extra bits */
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
dist += (unsigned)hold & ((1U << op) - 1);
#ifdef INFLATE_STRICT
if (dist > dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
hold >>= op;
bits -= op;
Tracevv((stderr, "inflate: distance %u\n", dist));
op = (unsigned)(out - beg); /* max distance in output */
if (dist > op) { /* see if copy from window */
op = dist - op; /* distance back in window */
if (op > whave) {
if (state->sane) {
strm->msg =
(char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
if (len <= op - whave) {
do {
*out++ = 0;
} while (--len);
continue;
}
len -= op - whave;
do {
*out++ = 0;
} while (--op > whave);
if (op == 0) {
from = out - dist;
do {
*out++ = *from++;
} while (--len);
continue;
}
#endif
}
from = window;
if (wnext == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
*out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
else if (wnext < op) { /* wrap around window */
from += wsize + wnext - op;
op -= wnext;
if (op < len) { /* some from end of window */
len -= op;
do {
*out++ = *from++;
} while (--op);
from = window;
if (wnext < len) { /* some from start of window */
op = wnext;
len -= op;
do {
*out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
}
else { /* contiguous in window */
from += wnext - op;
if (op < len) { /* some from window */
len -= op;
do {
*out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
*out++ = *from++;
*out++ = *from++;
*out++ = *from++;
len -= 3;
}
if (len) {
*out++ = *from++;
if (len > 1)
*out++ = *from++;
}
}
else {
from = out - dist; /* copy direct from output */
do { /* minimum length is three */
*out++ = *from++;
*out++ = *from++;
*out++ = *from++;
len -= 3;
} while (len > 2);
if (len) {
*out++ = *from++;
if (len > 1)
*out++ = *from++;
}
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
here = dcode[here.val + (hold & ((1U << op) - 1))];
goto dodist;
}
else {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
}
else if ((op & 64) == 0) { /* 2nd level length code */
here = lcode[here.val + (hold & ((1U << op) - 1))];
goto dolen;
}
else if (op & 32) { /* end-of-block */
Tracevv((stderr, "inflate: end of block\n"));
state->mode = TYPE;
break;
}
else {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
} while (in < last && out < end);
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
len = bits >> 3;
in -= len;
bits -= len << 3;
hold &= (1U << bits) - 1;
/* update state and return */
strm->next_in = in;
strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));
state->hold = hold;
state->bits = bits;
return;
}
/*
inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
- Using bit fields for code structure
- Different op definition to avoid & for extra bits (do & for table bits)
- Three separate decoding do-loops for direct, window, and wnext == 0
- Special case for distance > 1 copies to do overlapped load and store copy
- Explicit branch predictions (based on measured branch probabilities)
- Deferring match copy and interspersed it with decoding subsequent codes
- Swapping literal/length else
- Swapping window/direct else
- Larger unrolled copy loops (three is about right)
- Moving len -= 3 statement into middle of loop
*/
#endif /* !ASMINF */
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5506_0 |
crossvul-cpp_data_bad_1412_1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <uapi/linux/btf.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/perf_event.h>
#include <linux/ctype.h>
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#define BPF_PROG_TYPE(_id, _name) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
*
* The first pass is depth-first-search to check that the program is a DAG.
* It rejects the following programs:
* - larger than BPF_MAXINSNS insns
* - if loop is present (detected via back-edge)
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*
* On entry to each instruction, each register has a type, and the instruction
* changes the types of the registers depending on instruction semantics.
* If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
* copied to R1.
*
* All registers are 64-bit.
* R0 - return register
* R1-R5 argument passing registers
* R6-R9 callee saved registers
* R10 - frame pointer read-only
*
* At the start of BPF program the register R1 contains a pointer to bpf_context
* and has type PTR_TO_CTX.
*
* Verifier tracks arithmetic operations on pointers in case:
* BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
* 1st insn copies R10 (which has FRAME_PTR) type into R1
* and 2nd arithmetic instruction is pattern matched to recognize
* that it wants to construct a pointer to some element within stack.
* So after 2nd insn, the register R1 has type PTR_TO_STACK
* (and -20 constant is saved for further stack bounds checking).
* Meaning that this reg is a pointer to stack plus known immediate constant.
*
* Most of the time the registers have SCALAR_VALUE type, which
* means the register has some value, but it's not a valid pointer.
* (like pointer plus pointer becomes SCALAR_VALUE type)
*
* When verifier sees load or store instructions the type of base register
* can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
* four pointer types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
*
* registers used to pass values to function calls are checked against
* function argument constraints.
*
* ARG_PTR_TO_MAP_KEY is one of such argument constraints.
* It means that the register type passed to this function must be
* PTR_TO_STACK and it will be used inside the function as
* 'pointer to map element key'
*
* For example the argument constraints for bpf_map_lookup_elem():
* .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
* .arg1_type = ARG_CONST_MAP_PTR,
* .arg2_type = ARG_PTR_TO_MAP_KEY,
*
* ret_type says that this function returns 'pointer to map elem value or null'
* function expects 1st argument to be a const pointer to 'struct bpf_map' and
* 2nd argument should be a pointer to stack, which will be used inside
* the helper function as a pointer to map element key.
*
* On the kernel side the helper function looks like:
* u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* {
* struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
* void *key = (void *) (unsigned long) r2;
* void *value;
*
* here kernel can access 'key' and 'map' pointers safely, knowing that
* [key, key + map->key_size) bytes are valid and were initialized on
* the stack of eBPF program.
* }
*
* Corresponding eBPF program may look like:
* BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
* BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
* BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
* here verifier looks at prototype of map_lookup_elem() and sees:
* .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
* Now verifier knows that this map has key of R1->map_ptr->key_size bytes
*
* Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
* Now verifier checks that [R2, R2 + map's key_size) are within stack limits
* and were initialized prior to this call.
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
* returns ether pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
* PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
* branch. See check_cond_jmp_op().
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
*
* The following reference types represent a potential reference to a kernel
* resource which, after first being allocated, must be checked and freed by
* the BPF program:
* - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
*
* When the verifier sees a helper call return a reference type, it allocates a
* pointer id for the reference and stores it in the current function state.
* Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
* PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
* passes through a NULL-check conditional. For the branch wherein the state is
* changed to CONST_IMM, the verifier releases the reference.
*
* For each helper function that allocates a reference, such as
* bpf_sk_lookup_tcp(), there is a corresponding release function, such as
* bpf_sk_release(). When a reference type passes into the release function,
* the verifier also releases the reference. If any unchecked or unreleased
* reference remains at the end of the program, the verifier rejects it.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
POISON_POINTER_DELTA))
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
}
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{
return aux->map_state & BPF_MAP_PTR_UNPRIV;
}
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
const struct bpf_map *map, bool unpriv)
{
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
unpriv |= bpf_map_ptr_unpriv(aux);
aux->map_state = (unsigned long)map |
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
}
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
bool pkt_access;
int regno;
int access_size;
s64 msize_smax_value;
u64 msize_umax_value;
int ptr_id;
};
static DEFINE_MUTEX(bpf_verifier_lock);
static const struct bpf_line_info *
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
{
const struct bpf_line_info *linfo;
const struct bpf_prog *prog;
u32 i, nr_linfo;
prog = env->prog;
nr_linfo = prog->aux->nr_linfo;
if (!nr_linfo || insn_off >= prog->len)
return NULL;
linfo = prog->aux->linfo;
for (i = 1; i < nr_linfo; i++)
if (insn_off < linfo[i].insn_off)
break;
return &linfo[i - 1];
}
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
va_list args)
{
unsigned int n;
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
/* log_level controls verbosity level of eBPF verifier.
* bpf_verifier_log_write() is used to dump the verification trace to the log,
* so the user can figure out what's wrong with the program
*/
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...)
{
va_list args;
if (!bpf_verifier_log_needed(&env->log))
return;
va_start(args, fmt);
bpf_verifier_vlog(&env->log, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
{
struct bpf_verifier_env *env = private_data;
va_list args;
if (!bpf_verifier_log_needed(&env->log))
return;
va_start(args, fmt);
bpf_verifier_vlog(&env->log, fmt, args);
va_end(args);
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...)
{
const struct bpf_line_info *linfo;
if (!bpf_verifier_log_needed(&env->log))
return;
linfo = find_linfo(env, insn_off);
if (!linfo || linfo == env->prev_linfo)
return;
if (prefix_fmt) {
va_list args;
va_start(args, prefix_fmt);
bpf_verifier_vlog(&env->log, prefix_fmt, args);
va_end(args);
}
verbose(env, "%s\n",
ltrim(btf_name_by_offset(env->prog->aux->btf,
linfo->line_off)));
env->prev_linfo = linfo;
}
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_PACKET ||
type == PTR_TO_PACKET_META;
}
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
type == PTR_TO_SOCKET_OR_NULL;
}
static bool type_is_refcounted(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET;
}
static bool type_is_refcounted_or_null(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
}
static bool reg_is_refcounted(const struct bpf_reg_state *reg)
{
return type_is_refcounted(reg->type);
}
static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
{
return type_is_refcounted_or_null(reg->type);
}
static bool arg_type_is_refcounted(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_SOCKET;
}
/* Determine whether the function releases some resources allocated by another
* function call. The first reference type argument will be assumed to be
* released by release_reference().
*/
static bool is_release_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_release;
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
[SCALAR_VALUE] = "inv",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
[PTR_TO_STACK] = "fp",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_PACKET_END] = "pkt_end",
[PTR_TO_FLOW_KEYS] = "flow_keys",
[PTR_TO_SOCKET] = "sock",
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
};
static char slot_type_char[] = {
[STACK_INVALID] = '?',
[STACK_SPILL] = 'r',
[STACK_MISC] = 'm',
[STACK_ZERO] = '0',
};
static void print_liveness(struct bpf_verifier_env *env,
enum bpf_reg_liveness live)
{
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
verbose(env, "_");
if (live & REG_LIVE_READ)
verbose(env, "r");
if (live & REG_LIVE_WRITTEN)
verbose(env, "w");
if (live & REG_LIVE_DONE)
verbose(env, "D");
}
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[reg->frameno];
}
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state)
{
const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
if (state->frameno)
verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]);
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
if (t == PTR_TO_STACK)
verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t))
verbose(env, ",r=%d", reg->range);
else if (t == CONST_PTR_TO_MAP ||
t == PTR_TO_MAP_VALUE ||
t == PTR_TO_MAP_VALUE_OR_NULL)
verbose(env, ",ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (tnum_is_const(reg->var_off)) {
/* Typically an immediate SCALAR_VALUE, but
* could be a pointer whose offset is too big
* for reg->off
*/
verbose(env, ",imm=%llx", reg->var_off.value);
} else {
if (reg->smin_value != reg->umin_value &&
reg->smin_value != S64_MIN)
verbose(env, ",smin_value=%lld",
(long long)reg->smin_value);
if (reg->smax_value != reg->umax_value &&
reg->smax_value != S64_MAX)
verbose(env, ",smax_value=%lld",
(long long)reg->smax_value);
if (reg->umin_value != 0)
verbose(env, ",umin_value=%llu",
(unsigned long long)reg->umin_value);
if (reg->umax_value != U64_MAX)
verbose(env, ",umax_value=%llu",
(unsigned long long)reg->umax_value);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, ",var_off=%s", tn_buf);
}
}
verbose(env, ")");
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
char types_buf[BPF_REG_SIZE + 1];
bool valid = false;
int j;
for (j = 0; j < BPF_REG_SIZE; j++) {
if (state->stack[i].slot_type[j] != STACK_INVALID)
valid = true;
types_buf[j] = slot_type_char[
state->stack[i].slot_type[j]];
}
types_buf[BPF_REG_SIZE] = 0;
if (!valid)
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
if (state->stack[i].slot_type[0] == STACK_SPILL)
verbose(env, "=%s",
reg_type_str[state->stack[i].spilled_ptr.type]);
else
verbose(env, "=%s", types_buf);
}
if (state->acquired_refs && state->refs[0].id) {
verbose(env, " refs=%d", state->refs[0].id);
for (i = 1; i < state->acquired_refs; i++)
if (state->refs[i].id)
verbose(env, ",%d", state->refs[i].id);
}
verbose(env, "\n");
}
#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int copy_##NAME##_state(struct bpf_func_state *dst, \
const struct bpf_func_state *src) \
{ \
if (!src->FIELD) \
return 0; \
if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
/* internal bug, make state invalid to reject the program */ \
memset(dst, 0, sizeof(*dst)); \
return -EFAULT; \
} \
memcpy(dst->FIELD, src->FIELD, \
sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
return 0; \
}
/* copy_reference_state() */
COPY_STATE_FN(reference, acquired_refs, refs, 1)
/* copy_stack_state() */
COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef COPY_STATE_FN
#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
bool copy_old) \
{ \
u32 old_size = state->COUNT; \
struct bpf_##NAME##_state *new_##FIELD; \
int slot = size / SIZE; \
\
if (size <= old_size || !size) { \
if (copy_old) \
return 0; \
state->COUNT = slot * SIZE; \
if (!size && old_size) { \
kfree(state->FIELD); \
state->FIELD = NULL; \
} \
return 0; \
} \
new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
GFP_KERNEL); \
if (!new_##FIELD) \
return -ENOMEM; \
if (copy_old) { \
if (state->FIELD) \
memcpy(new_##FIELD, state->FIELD, \
sizeof(*new_##FIELD) * (old_size / SIZE)); \
memset(new_##FIELD + old_size / SIZE, 0, \
sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
} \
state->COUNT = slot * SIZE; \
kfree(state->FIELD); \
state->FIELD = new_##FIELD; \
return 0; \
}
/* realloc_reference_state() */
REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
/* realloc_stack_state() */
REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef REALLOC_STATE_FN
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which realloc_stack_state() copies over. It points to previous
* bpf_verifier_state which is never reallocated.
*/
static int realloc_func_state(struct bpf_func_state *state, int stack_size,
int refs_size, bool copy_old)
{
int err = realloc_reference_state(state, refs_size, copy_old);
if (err)
return err;
return realloc_stack_state(state, stack_size, copy_old);
}
/* Acquire a pointer id from the env and update the state->refs to include
* this new pointer reference.
* On success, returns a valid pointer id to associate with the register
* On failure, returns a negative errno.
*/
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_func_state *state = cur_func(env);
int new_ofs = state->acquired_refs;
int id, err;
err = realloc_reference_state(state, state->acquired_refs + 1, true);
if (err)
return err;
id = ++env->id_gen;
state->refs[new_ofs].id = id;
state->refs[new_ofs].insn_idx = insn_idx;
return id;
}
/* release function corresponding to acquire_reference_state(). Idempotent. */
static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
{
int i, last_idx;
if (!ptr_id)
return -EFAULT;
last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
if (state->refs[i].id == ptr_id) {
if (last_idx && i != last_idx)
memcpy(&state->refs[i], &state->refs[last_idx],
sizeof(*state->refs));
memset(&state->refs[last_idx], 0, sizeof(*state->refs));
state->acquired_refs--;
return 0;
}
}
return -EFAULT;
}
/* variation on the above for cases where we expect that there must be an
* outstanding reference for the specified ptr_id.
*/
static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
{
struct bpf_func_state *state = cur_func(env);
int err;
err = __release_reference_state(state, ptr_id);
if (WARN_ON_ONCE(err != 0))
verbose(env, "verifier internal error: can't release reference\n");
return err;
}
static int transfer_reference_state(struct bpf_func_state *dst,
struct bpf_func_state *src)
{
int err = realloc_reference_state(dst, src->acquired_refs, false);
if (err)
return err;
err = copy_reference_state(dst, src);
if (err)
return err;
return 0;
}
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
kfree(state->refs);
kfree(state->stack);
kfree(state);
}
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
int i;
for (i = 0; i <= state->curframe; i++) {
free_func_state(state->frame[i]);
state->frame[i] = NULL;
}
if (free_self)
kfree(state);
}
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
static int copy_func_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{
int err;
err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
false);
if (err)
return err;
memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
err = copy_reference_state(dst, src);
if (err)
return err;
return copy_stack_state(dst, src);
}
static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
int i, err;
/* if dst has more stack frames then src frame, free them */
for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL;
}
dst_state->speculative = src->speculative;
dst_state->curframe = src->curframe;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst)
return -ENOMEM;
dst_state->frame[i] = dst;
}
err = copy_func_state(dst, src->frame[i]);
if (err)
return err;
}
return 0;
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem, *head = env->head;
int err;
if (env->head == NULL)
return -ENOENT;
if (cur) {
err = copy_verifier_state(cur, &head->st);
if (err)
return err;
}
if (insn_idx)
*insn_idx = head->insn_idx;
if (prev_insn_idx)
*prev_insn_idx = head->prev_insn_idx;
elem = head->next;
free_verifier_state(&head->st, false);
kfree(head);
env->head = elem;
env->stack_size--;
return 0;
}
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx,
bool speculative)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem;
int err;
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
env->head = elem;
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
if (err)
goto err;
elem->st.speculative |= speculative;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
verbose(env, "BPF program is too complex\n");
goto err;
}
return &elem->st;
err:
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL));
return NULL;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void __mark_reg_not_init(struct bpf_reg_state *reg);
/* Mark the unknown part of a register (variable offset or scalar value) as
* known to have the value @imm.
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
/* Clear id, off, and union(map_ptr, range) */
memset(((u8 *)reg) + sizeof(reg->type), 0,
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm;
reg->umin_value = imm;
reg->umax_value = imm;
}
/* Mark the 'variable offset' part of a register as zero. This should be
* used only on registers holding a pointer type.
*/
static void __mark_reg_known_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
}
static void __mark_reg_const_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
reg->type = SCALAR_VALUE;
}
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_known_zero(regs + regno);
}
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{
return type_is_pkt_pointer(reg->type);
}
static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
{
return reg_is_pkt_pointer(reg) ||
reg->type == PTR_TO_PACKET_END;
}
/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
enum bpf_reg_type which)
{
/* The register can already have a range from prior markings.
* This is fine as long as it hasn't been advanced from its
* origin.
*/
return reg->type == which &&
reg->id == 0 &&
reg->off == 0 &&
tnum_equals_const(reg->var_off, 0);
}
/* Attempts to improve min/max values based on var_off information */
static void __update_reg_bounds(struct bpf_reg_state *reg)
{
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN));
/* max signed is min(sign bit) | max(other bits) */
reg->smax_value = min_t(s64, reg->smax_value,
reg->var_off.value | (reg->var_off.mask & S64_MAX));
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
reg->var_off.value | reg->var_off.mask);
}
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
if (reg->smin_value >= 0 || reg->smax_value < 0) {
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
return;
}
/* Learn sign from unsigned bounds. Signed bounds cross the sign
* boundary, so we must be careful.
*/
if ((s64)reg->umax_value >= 0) {
/* Positive. We can't learn anything from the smin, but smax
* is positive, hence safe.
*/
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
} else if ((s64)reg->umin_value < 0) {
/* Negative. We can't learn anything from the smax, but smin
* is negative, hence safe.
*/
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value;
}
}
/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg)
{
reg->var_off = tnum_intersect(reg->var_off,
tnum_range(reg->umin_value,
reg->umax_value));
}
/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
reg->smin_value = S64_MIN;
reg->smax_value = S64_MAX;
reg->umin_value = 0;
reg->umax_value = U64_MAX;
}
/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(struct bpf_reg_state *reg)
{
/*
* Clear type, id, off, and union(map_ptr, range) and
* padding between 'type' and union
*/
memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
reg->type = SCALAR_VALUE;
reg->var_off = tnum_unknown;
reg->frameno = 0;
__mark_reg_unbounded(reg);
}
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_unknown(regs + regno);
}
static void __mark_reg_not_init(struct bpf_reg_state *reg)
{
__mark_reg_unknown(reg);
reg->type = NOT_INIT;
}
static void mark_reg_not_init(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_not_init(regs + regno);
}
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
struct bpf_reg_state *regs = state->regs;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
mark_reg_not_init(env, regs, i);
regs[i].live = REG_LIVE_NONE;
regs[i].parent = NULL;
}
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(env, regs, BPF_REG_FP);
regs[BPF_REG_FP].frameno = state->frameno;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
}
#define BPF_MAIN_FUNC (-1)
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int callsite, int frameno, int subprogno)
{
state->callsite = callsite;
state->frameno = frameno;
state->subprogno = subprogno;
init_reg_state(env, state);
}
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
static int cmp_subprogs(const void *a, const void *b)
{
return ((struct bpf_subprog_info *)a)->start -
((struct bpf_subprog_info *)b)->start;
}
static int find_subprog(struct bpf_verifier_env *env, int off)
{
struct bpf_subprog_info *p;
p = bsearch(&off, env->subprog_info, env->subprog_cnt,
sizeof(env->subprog_info[0]), cmp_subprogs);
if (!p)
return -ENOENT;
return p - env->subprog_info;
}
static int add_subprog(struct bpf_verifier_env *env, int off)
{
int insn_cnt = env->prog->len;
int ret;
if (off >= insn_cnt || off < 0) {
verbose(env, "call to invalid destination\n");
return -EINVAL;
}
ret = find_subprog(env, off);
if (ret >= 0)
return 0;
if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
verbose(env, "too many subprograms\n");
return -E2BIG;
}
env->subprog_info[env->subprog_cnt++].start = off;
sort(env->subprog_info, env->subprog_cnt,
sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
return 0;
}
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
/* Add entry function. */
ret = add_subprog(env, 0);
if (ret < 0)
return ret;
/* determine subprog starts. The end is one before the next starts */
for (i = 0; i < insn_cnt; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
if (!env->allow_ptr_leaks) {
verbose(env, "function calls to other bpf functions are allowed for root only\n");
return -EPERM;
}
ret = add_subprog(env, i + insn[i].imm + 1);
if (ret < 0)
return ret;
}
/* Add a fake 'exit' subprog which could simplify subprog iteration
* logic. 'subprog_cnt' should not be increased.
*/
subprog[env->subprog_cnt].start = insn_cnt;
if (env->log.level > 1)
for (i = 0; i < env->subprog_cnt; i++)
verbose(env, "func#%d @%d\n", i, subprog[i].start);
/* now check that all jumps are within the same subprog */
subprog_start = subprog[cur_subprog].start;
subprog_end = subprog[cur_subprog + 1].start;
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
if (BPF_CLASS(code) != BPF_JMP)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
}
next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
* the last insn of the subprog should be either exit
* or unconditional jump back
*/
if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL;
}
subprog_start = subprog_end;
cur_subprog++;
if (cur_subprog < env->subprog_cnt)
subprog_end = subprog[cur_subprog + 1].start;
}
}
return 0;
}
/* Parentage chain of this register (or stack slot) should take care of all
* issues like callee-saved registers, stack slot allocation time, etc.
*/
static int mark_reg_read(struct bpf_verifier_env *env,
const struct bpf_reg_state *state,
struct bpf_reg_state *parent)
{
bool writes = parent == state->parent; /* Observe write marks */
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (writes && state->live & REG_LIVE_WRITTEN)
break;
if (parent->live & REG_LIVE_DONE) {
verbose(env, "verifier BUG type %s var_off %lld off %d\n",
reg_type_str[parent->type],
parent->var_off.value, parent->off);
return -EFAULT;
}
/* ... then we depend on parent's value */
parent->live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
writes = true;
}
return 0;
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
return -EINVAL;
}
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (regs[regno].type == NOT_INIT) {
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
/* We don't need to worry about FP liveness because it's read-only */
if (regno != BPF_REG_FP)
return mark_reg_read(env, ®s[regno],
regs[regno].parent);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose(env, "frame pointer is read only\n");
return -EACCES;
}
regs[regno].live |= REG_LIVE_WRITTEN;
if (t == DST_OP)
mark_reg_unknown(env, regs, regno);
}
return 0;
}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_MAP_VALUE:
case PTR_TO_MAP_VALUE_OR_NULL:
case PTR_TO_STACK:
case PTR_TO_CTX:
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_PACKET_END:
case PTR_TO_FLOW_KEYS:
case CONST_PTR_TO_MAP:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
return true;
default:
return false;
}
}
/* Does this register contain a constant zero? */
static bool register_is_null(struct bpf_reg_state *reg)
{
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
}
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_env *env,
struct bpf_func_state *state, /* func where register points to */
int off, int size, int value_regno, int insn_idx)
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
enum bpf_reg_type type;
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
state->acquired_refs, true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (!env->allow_ptr_leaks &&
state->stack[spi].slot_type[0] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose(env, "attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0 &&
is_spillable_regtype((type = cur->regs[value_regno].type))) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
if (state != cur && type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
/* save register state */
state->stack[spi].spilled_ptr = cur->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++) {
if (state->stack[spi].slot_type[i] == STACK_MISC &&
!env->allow_ptr_leaks) {
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
int soff = (-spi - 1) * BPF_REG_SIZE;
/* detected reuse of integer stack slot with a pointer
* which means either llvm is reusing stack slot or
* an attacker is trying to exploit CVE-2018-3639
* (speculative store bypass)
* Have to sanitize that slot with preemptive
* store of zero.
*/
if (*poff && *poff != soff) {
/* disallow programs where single insn stores
* into two different stack slots, since verifier
* cannot sanitize them
*/
verbose(env,
"insn %d cannot access two stack slots fp%d and fp%d",
insn_idx, *poff, soff);
return -EINVAL;
}
*poff = soff;
}
state->stack[spi].slot_type[i] = STACK_SPILL;
}
} else {
u8 type = STACK_MISC;
/* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
if (state->stack[spi].slot_type[0] == STACK_SPILL)
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_MISC;
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
* when stack slots are partially written.
* This heuristic means that read propagation will be
* conservative, since it will add reg_live_read marks
* to stack slots all the way to first state when programs
* writes+reads less than 8 bytes
*/
if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */
if (value_regno >= 0 &&
register_is_null(&cur->regs[value_regno]))
type = STACK_ZERO;
/* Mark slots affected by this stack write. */
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
type;
}
return 0;
}
static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_func_state *reg_state /* func where register points to */,
int off, int size, int value_regno)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
u8 *stype;
if (reg_state->allocated_stack <= slot) {
verbose(env, "invalid read from stack off %d+0 size %d\n",
off, size);
return -EACCES;
}
stype = reg_state->stack[spi].slot_type;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
verbose(env, "corrupted spill memory\n");
return -EACCES;
}
}
if (value_regno >= 0) {
/* restore register state from stack */
state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
/* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
mark_reg_read(env, ®_state->stack[spi].spilled_ptr,
reg_state->stack[spi].spilled_ptr.parent);
return 0;
} else {
int zeros = 0;
for (i = 0; i < size; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
continue;
if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
zeros++;
continue;
}
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
mark_reg_read(env, ®_state->stack[spi].spilled_ptr,
reg_state->stack[spi].spilled_ptr.parent);
if (value_regno >= 0) {
if (zeros == size) {
/* any size read into register is zero extended,
* so the whole register == const_zero
*/
__mark_reg_const_zero(&state->regs[value_regno]);
} else {
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, value_regno);
}
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
return 0;
}
}
static int check_stack_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size)
{
/* Stack accesses must be at a fixed offset, so that we
* can determine what type of data were returned. See
* check_stack_read().
*/
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable stack access var_off=%s off=%d size=%d",
tn_buf, off, size);
return -EACCES;
}
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose(env, "invalid stack off=%d size=%d\n", off, size);
return -EACCES;
}
return 0;
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_map *map = regs[regno].map_ptr;
if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
off + size > map->value_size) {
verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
map->value_size, off, size);
return -EACCES;
}
return 0;
}
/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
/* We may have adjusted the register to this map value, so we
* need to try adding each of min_value and max_value to off
* to make sure our theoretical access will be safe.
*/
if (env->log.level)
print_verifier_state(env, state);
/* The minimum value is only important with signed
* comparisons where we can't assume the floor of a
* value is 0. If we are using signed variables for our
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
if (reg->smin_value < 0 &&
(reg->smin_value == S64_MIN ||
(off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
reg->smin_value + off < 0)) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_map_access(env, regno, reg->smin_value + off, size,
zero_size_allowed);
if (err) {
verbose(env, "R%d min value is outside of the array range\n",
regno);
return err;
}
/* If we haven't set a max value then we need to bail since we can't be
* sure we won't do bad things.
* If reg->umax_value + off could overflow, treat that as unbounded too.
*/
if (reg->umax_value >= BPF_MAX_VAR_OFF) {
verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
regno);
return -EACCES;
}
err = __check_map_access(env, regno, reg->umax_value + off, size,
zero_size_allowed);
if (err)
verbose(env, "R%d max value is outside of the array range\n",
regno);
return err;
}
#define MAX_PACKET_OFF 0xffff
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE)
return false;
/* fallthrough */
/* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
if (meta)
return meta->pkt_access;
env->seen_direct_write = true;
return true;
default:
return false;
}
}
static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
(u64)off + size > reg->range) {
verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
}
return 0;
}
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
int err;
/* We may have added a variable offset to the packet pointer; but any
* reg->range we have comes after that. We are only checking the fixed
* offset.
*/
/* We don't allow negative numbers, because we aren't tracking enough
* detail to prove they're safe.
*/
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_packet_access(env, regno, off, size, zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
}
/* __check_packet_access has made sure "off + size - 1" is within u16.
* reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
* otherwise find_good_pkt_pointers would have refused to set range info
* that __check_packet_access would have rejected this pkt access.
* Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
*/
env->prog->aux->max_pkt_offset =
max_t(u32, env->prog->aux->max_pkt_offset,
off + reg->umax_value + size - 1);
return err;
}
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
};
if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, env->prog, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
* access than actual ctx access size. A zero info.ctx_field_size
* will only allow for whole field access and rejects any other
* type of narrower access.
*/
*reg_type = info.reg_type;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
return 0;
}
verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
return -EACCES;
}
static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
int size)
{
if (size < 0 || off < 0 ||
(u64)off + size > sizeof(struct bpf_flow_keys)) {
verbose(env, "invalid access to flow keys off=%d size=%d\n",
off, size);
return -EACCES;
}
return 0;
}
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, enum bpf_access_type t)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
struct bpf_insn_access_aux info;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
if (!bpf_sock_is_valid_access(off, size, t, &info)) {
verbose(env, "invalid bpf_sock access off=%d size=%d\n",
off, size);
return -EACCES;
}
return 0;
}
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
if (allow_ptr_leaks)
return false;
return reg->type != SCALAR_VALUE;
}
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{
return cur_regs(env) + regno;
}
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
}
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
return reg->type == PTR_TO_CTX ||
reg->type == PTR_TO_SOCKET;
}
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
return type_is_pkt_pointer(reg->type);
}
static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
return reg->type == PTR_TO_FLOW_KEYS;
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
{
struct tnum reg_off;
int ip_align;
/* Byte size accesses are always allowed. */
if (!strict || size == 1)
return 0;
/* For platforms that do not have a Kconfig enabling
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
* NET_IP_ALIGN is universally set to '2'. And on platforms
* that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
* to this code only in strict mode where we want to emulate
* the NET_IP_ALIGN==2 checking. Therefore use an
* unconditional IP align value of '2'.
*/
ip_align = 2;
reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
if (!tnum_is_aligned(reg_off, size)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env,
"misaligned packet access off %d+%s+%d+%d size %d\n",
ip_align, tn_buf, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char *pointer_desc,
int off, int size, bool strict)
{
struct tnum reg_off;
/* Byte size accesses are always allowed. */
if (!strict || size == 1)
return 0;
reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
if (!tnum_is_aligned(reg_off, size)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
pointer_desc, tn_buf, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int off,
int size, bool strict_alignment_once)
{
bool strict = env->strict_alignment || strict_alignment_once;
const char *pointer_desc = "";
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
/* Special case, because of NET_IP_ALIGN. Given metadata sits
* right in front, treat it the very same way.
*/
return check_pkt_ptr_alignment(env, reg, off, size, strict);
case PTR_TO_FLOW_KEYS:
pointer_desc = "flow keys ";
break;
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
case PTR_TO_CTX:
pointer_desc = "context ";
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
/* The stack spill tracking logic in check_stack_write()
* and check_stack_read() relies on stack accesses being
* aligned.
*/
strict = true;
break;
case PTR_TO_SOCKET:
pointer_desc = "sock ";
break;
default:
break;
}
return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
strict);
}
static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
u16 stack = env->subprog_info[func->subprogno].stack_depth;
if (stack >= -off)
return 0;
/* update known max for given subprogram */
env->subprog_info[func->subprogno].stack_depth = -off;
return 0;
}
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
* Since recursion is prevented by check_cfg() this algorithm
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
*/
static int check_max_stack_depth(struct bpf_verifier_env *env)
{
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int ret_insn[MAX_CALL_FRAMES];
int ret_prog[MAX_CALL_FRAMES];
process_func:
/* round up to 32-bytes, since this is granularity
* of interpreter stack size
*/
depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
if (depth > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
frame + 1, depth);
return -EACCES;
}
continue_func:
subprog_end = subprog[idx + 1].start;
for (; i < subprog_end; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
/* remember insn and function to return to */
ret_insn[frame] = i + 1;
ret_prog[frame] = idx;
/* find the callee */
i = i + insn[i].imm + 1;
idx = find_subprog(env, i);
if (idx < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
i);
return -EFAULT;
}
frame++;
if (frame >= MAX_CALL_FRAMES) {
WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
return -EFAULT;
}
goto process_func;
}
/* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT
*/
if (frame == 0)
return 0;
depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
frame--;
i = ret_insn[frame];
idx = ret_prog[frame];
goto continue_func;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
static int get_callee_stack_depth(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int idx)
{
int start = idx + insn->imm + 1, subprog;
subprog = find_subprog(env, start);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
start);
return -EFAULT;
}
return env->subprog_info[subprog].stack_depth;
}
#endif
static int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno)
{
/* Access to ctx or passing it to a helper is only allowed in
* its original, unmodified form.
*/
if (reg->off) {
verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
regno, reg->off);
return -EACCES;
}
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
return -EACCES;
}
return 0;
}
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{
u64 mask;
/* clear high bits in bit representation */
reg->var_off = tnum_cast(reg->var_off, size);
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
reg->umin_value &= mask;
reg->umax_value &= mask;
} else {
reg->umin_value = 0;
reg->umax_value = mask;
}
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
int off, int bpf_size, enum bpf_access_type t,
int value_regno, bool strict_alignment_once)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
struct bpf_func_state *state;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
if (size < 0)
return size;
/* alignment checks will add in reg->off themselves */
err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
if (err)
return err;
/* for access checks, reg->off is just part of off */
off += reg->off;
if (reg->type == PTR_TO_MAP_VALUE) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into map\n", value_regno);
return -EACCES;
}
err = check_map_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
* PTR_TO_PACKET[_META,_END]. In the latter
* case, we know the offset is zero.
*/
if (reg_type == SCALAR_VALUE)
mark_reg_unknown(env, regs, value_regno);
else
mark_reg_known_zero(env, regs,
value_regno);
regs[value_regno].type = reg_type;
}
} else if (reg->type == PTR_TO_STACK) {
off += reg->var_off.value;
err = check_stack_access(env, reg, off, size);
if (err)
return err;
state = func(env, reg);
err = update_stack_depth(env, state, off);
if (err)
return err;
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
value_regno, insn_idx);
else
err = check_stack_read(env, state, off, size,
value_regno);
} else if (reg_is_pkt_pointer(reg)) {
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
verbose(env, "cannot write into packet\n");
return -EACCES;
}
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into packet\n",
value_regno);
return -EACCES;
}
err = check_packet_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_FLOW_KEYS) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into flow keys\n",
value_regno);
return -EACCES;
}
err = check_flow_keys_access(env, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_SOCKET) {
if (t == BPF_WRITE) {
verbose(env, "cannot write into socket\n");
return -EACCES;
}
err = check_sock_access(env, regno, off, size, t);
if (!err && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]);
return -EACCES;
}
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
coerce_reg_to_size(®s[value_regno], size);
}
return err;
}
static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
insn->imm != 0) {
verbose(env, "BPF_XADD uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
}
if (is_ctx_reg(env, insn->dst_reg) ||
is_pkt_reg(env, insn->dst_reg) ||
is_flow_key_reg(env, insn->dst_reg)) {
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1, true);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
}
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized.
* Unlike most pointer bounds-checking functions, this one doesn't take an
* 'off' argument, so it has to add in reg->off itself.
*/
static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi;
if (reg->type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(reg))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[reg->type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
off = reg->off + reg->var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
return 0;
}
for (i = 0; i < access_size; i++) {
u8 *stype;
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot)
goto err;
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
if (*stype == STACK_ZERO) {
/* helper can write anything into the stack */
*stype = STACK_MISC;
goto mark;
}
err:
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
mark:
/* reading any byte out of 8-byte 'spill_slot' will cause
* the whole slot to be marked as 'read'
*/
mark_reg_read(env, &state->stack[spi].spilled_ptr,
state->stack[spi].spilled_ptr.parent);
}
return update_stack_depth(env, state, off);
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
return check_packet_access(env, regno, reg->off, access_size,
zero_size_allowed);
case PTR_TO_MAP_VALUE:
return check_map_access(env, regno, reg->off, access_size,
zero_size_allowed);
default: /* scalar_value|ptr_to_stack or invalid ptr */
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
}
}
static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_MEM ||
type == ARG_PTR_TO_MEM_OR_NULL ||
type == ARG_PTR_TO_UNINIT_MEM;
}
static bool arg_type_is_mem_size(enum bpf_arg_type type)
{
return type == ARG_CONST_SIZE ||
type == ARG_CONST_SIZE_OR_ZERO;
}
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
err = check_reg_arg(env, regno, SRC_OP);
if (err)
return err;
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
verbose(env, "R%d leaks addr into helper function\n",
regno);
return -EACCES;
}
return 0;
}
if (type_is_pkt_pointer(type) &&
!may_access_direct_pkt_data(env, meta, BPF_READ)) {
verbose(env, "helper access to the packet is not allowed\n");
return -EACCES;
}
if (arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE ||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
expected_type = SCALAR_VALUE;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_CTX) {
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
} else if (arg_type == ARG_PTR_TO_SOCKET) {
expected_type = PTR_TO_SOCKET;
if (type != expected_type)
goto err_type;
if (meta->ptr_id || !reg->id) {
verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
meta->ptr_id, reg->id);
return -EFAULT;
}
meta->ptr_id = reg->id;
} else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
if (register_is_null(reg) &&
arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
} else {
verbose(env, "unsupported arg_type %d\n", arg_type);
return -EFAULT;
}
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
meta->map_ptr = reg->map_ptr;
} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
/* bpf_map_xxx(..., map_ptr, ..., key) call:
* check that [key, key + map->key_size) are within
* stack limits and initialized
*/
if (!meta->map_ptr) {
/* in function declaration map_ptr must come before
* map_key, so that it's verified and known before
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
err = check_helper_mem_access(env, regno,
meta->map_ptr->key_size, false,
NULL);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
if (!meta->map_ptr) {
/* kernel subsystem misconfigured verifier */
verbose(env, "invalid map_ptr to access map->value\n");
return -EACCES;
}
meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
meta);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
/* remember the mem_size which may be used later
* to refine return values.
*/
meta->msize_smax_value = reg->smax_value;
meta->msize_umax_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
* initialize all the memory that the helper could
* just partially fill up.
*/
meta = NULL;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
regno);
return -EACCES;
}
if (reg->umin_value == 0) {
err = check_helper_mem_access(env, regno - 1, 0,
zero_size_allowed,
meta);
if (err)
return err;
}
if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
regno);
return -EACCES;
}
err = check_helper_mem_access(env, regno - 1,
reg->umax_value,
zero_size_allowed, meta);
}
return err;
err_type:
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[type], reg_type_str[expected_type]);
return -EACCES;
}
static int check_map_func_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map, int func_id)
{
if (!map)
return 0;
/* We need a two way check, first is from map perspective ... */
switch (map->map_type) {
case BPF_MAP_TYPE_PROG_ARRAY:
if (func_id != BPF_FUNC_tail_call)
goto error;
break;
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output &&
func_id != BPF_FUNC_perf_event_read_value)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
if (func_id != BPF_FUNC_skb_under_cgroup &&
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
if (func_id != BPF_FUNC_get_local_storage)
goto error;
break;
/* devmap returns a pointer to a live net_device ifindex that we cannot
* allow to be modified from bpf side. So do not allow lookup elements
* for now.
*/
case BPF_MAP_TYPE_DEVMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
/* Restrict bpf side of cpumap and xskmap, open when use-cases
* appear.
*/
case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash)
goto error;
break;
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
if (func_id != BPF_FUNC_sk_select_reuseport)
goto error;
break;
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
if (func_id != BPF_FUNC_map_peek_elem &&
func_id != BPF_FUNC_map_pop_elem &&
func_id != BPF_FUNC_map_push_elem)
goto error;
break;
default:
break;
}
/* ... and second from the function itself. */
switch (func_id) {
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
if (env->subprog_cnt > 1) {
verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
return -EINVAL;
}
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
case BPF_FUNC_perf_event_read_value:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
case BPF_FUNC_current_task_under_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
map->map_type != BPF_MAP_TYPE_CPUMAP &&
map->map_type != BPF_MAP_TYPE_XSKMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_map:
case BPF_FUNC_msg_redirect_map:
case BPF_FUNC_sock_map_update:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_hash:
case BPF_FUNC_msg_redirect_hash:
case BPF_FUNC_sock_hash_update:
if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
case BPF_FUNC_get_local_storage:
if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
goto error;
break;
case BPF_FUNC_sk_select_reuseport:
if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
goto error;
break;
case BPF_FUNC_map_peek_elem:
case BPF_FUNC_map_pop_elem:
case BPF_FUNC_map_push_elem:
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
map->map_type != BPF_MAP_TYPE_STACK)
goto error;
break;
default:
break;
}
return 0;
error:
verbose(env, "cannot pass map_type %d into func %s#%d\n",
map->map_type, func_id_name(func_id), func_id);
return -EINVAL;
}
static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
/* We only support one arg being in raw mode at the moment,
* which is sufficient for the helper functions we have
* right now.
*/
return count <= 1;
}
static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
enum bpf_arg_type arg_next)
{
return (arg_type_is_mem_ptr(arg_curr) &&
!arg_type_is_mem_size(arg_next)) ||
(!arg_type_is_mem_ptr(arg_curr) &&
arg_type_is_mem_size(arg_next));
}
static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
{
/* bpf_xxx(..., buf, len) call will access 'len'
* bytes from memory 'buf'. Both arg types need
* to be paired, so make sure there's no buggy
* helper function specification.
*/
if (arg_type_is_mem_size(fn->arg1_type) ||
arg_type_is_mem_ptr(fn->arg5_type) ||
check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
return false;
return true;
}
static bool check_refcount_ok(const struct bpf_func_proto *fn)
{
int count = 0;
if (arg_type_is_refcounted(fn->arg1_type))
count++;
if (arg_type_is_refcounted(fn->arg2_type))
count++;
if (arg_type_is_refcounted(fn->arg3_type))
count++;
if (arg_type_is_refcounted(fn->arg4_type))
count++;
if (arg_type_is_refcounted(fn->arg5_type))
count++;
/* We only support one arg being unreferenced at the moment,
* which is sufficient for the helper functions we have right now.
*/
return count <= 1;
}
static int check_func_proto(const struct bpf_func_proto *fn)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
check_refcount_ok(fn) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE.
*/
static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (reg_is_pkt_pointer_any(®s[i]))
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg_is_pkt_pointer_any(reg))
__mark_reg_unknown(reg);
}
}
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *vstate = env->cur_state;
int i;
for (i = 0; i <= vstate->curframe; i++)
__clear_all_pkt_pointers(env, vstate->frame[i]);
}
static void release_reg_references(struct bpf_verifier_env *env,
struct bpf_func_state *state, int id)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].id == id)
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg_is_refcounted(reg) && reg->id == id)
__mark_reg_unknown(reg);
}
}
/* The pointer with the specified id has released its reference to kernel
* resources. Identify all copies of the same pointer and clear the reference.
*/
static int release_reference(struct bpf_verifier_env *env,
struct bpf_call_arg_meta *meta)
{
struct bpf_verifier_state *vstate = env->cur_state;
int i;
for (i = 0; i <= vstate->curframe; i++)
release_reg_references(env, vstate->frame[i], meta->ptr_id);
return release_reference_state(env, meta->ptr_id);
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
int i, err, subprog, target_insn;
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
state->curframe + 2);
return -E2BIG;
}
target_insn = *insn_idx + insn->imm;
subprog = find_subprog(env, target_insn + 1);
if (subprog < 0) {
verbose(env, "verifier bug. No program starts at insn %d\n",
target_insn + 1);
return -EFAULT;
}
caller = state->frame[state->curframe];
if (state->frame[state->curframe + 1]) {
verbose(env, "verifier bug. Frame %d already allocated\n",
state->curframe + 1);
return -EFAULT;
}
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
if (!callee)
return -ENOMEM;
state->frame[state->curframe + 1] = callee;
/* callee cannot access r0, r6 - r9 for reading and has to write
* into its own stack before reading from it.
* callee can read/write into caller's stack
*/
init_func_state(env, callee,
/* remember the callsite, it will be used by bpf_exit */
*insn_idx /* callsite */,
state->curframe + 1 /* frameno within this callchain */,
subprog /* subprog number within this prog */);
/* Transfer references to the callee */
err = transfer_reference_state(callee, caller);
if (err)
return err;
/* copy r1 - r5 args that callee can access. The copy includes parent
* pointers, which connects us up to the liveness chain
*/
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
callee->regs[i] = caller->regs[i];
/* after the call registers r0 - r5 were scratched */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, caller->regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* only increment it after check_reg_arg() finished */
state->curframe++;
/* and go analyze first insn of the callee */
*insn_idx = target_insn;
if (env->log.level) {
verbose(env, "caller:\n");
print_verifier_state(env, caller);
verbose(env, "callee:\n");
print_verifier_state(env, callee);
}
return 0;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
struct bpf_reg_state *r0;
int err;
callee = state->frame[state->curframe];
r0 = &callee->regs[BPF_REG_0];
if (r0->type == PTR_TO_STACK) {
/* technically it's ok to return caller's stack pointer
* (or caller's caller's pointer) back to the caller,
* since these pointers are valid. Only current stack
* pointer will be invalid as soon as function exits,
* but let's be conservative
*/
verbose(env, "cannot return stack pointer to the caller\n");
return -EINVAL;
}
state->curframe--;
caller = state->frame[state->curframe];
/* return to the caller whatever r0 had in the callee */
caller->regs[BPF_REG_0] = *r0;
/* Transfer references to the caller */
err = transfer_reference_state(caller, callee);
if (err)
return err;
*insn_idx = callee->callsite + 1;
if (env->log.level) {
verbose(env, "returning from callee:\n");
print_verifier_state(env, callee);
verbose(env, "to caller at %d:\n", *insn_idx);
print_verifier_state(env, caller);
}
/* clear everything in the callee */
free_func_state(callee);
state->frame[state->curframe + 1] = NULL;
return 0;
}
static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
int func_id,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_probe_read_str))
return;
ret_reg->smax_value = meta->msize_smax_value;
ret_reg->umax_value = meta->msize_umax_value;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
}
static int
record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
int func_id, int insn_idx)
{
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
if (func_id != BPF_FUNC_tail_call &&
func_id != BPF_FUNC_map_lookup_elem &&
func_id != BPF_FUNC_map_update_elem &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_map_push_elem &&
func_id != BPF_FUNC_map_pop_elem &&
func_id != BPF_FUNC_map_peek_elem)
return 0;
if (meta->map_ptr == NULL) {
verbose(env, "kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
if (!BPF_MAP_PTR(aux->map_state))
bpf_map_ptr_store(aux, meta->map_ptr,
meta->map_ptr->unpriv_array);
else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
meta->map_ptr->unpriv_array);
return 0;
}
static int check_reference_leak(struct bpf_verifier_env *env)
{
struct bpf_func_state *state = cur_func(env);
int i;
for (i = 0; i < state->acquired_refs; i++) {
verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
state->refs[i].id, state->refs[i].insn_idx);
}
return state->acquired_refs ? -EINVAL : 0;
}
static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id, env->prog);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
return -EINVAL;
}
/* With LD_ABS/IND some JITs save/restore skb from r1. */
changes_data = bpf_helper_changes_pkt_data(fn->func);
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
func_id_name(func_id), func_id);
return -EINVAL;
}
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
err = check_func_proto(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
return err;
}
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
if (err)
return err;
err = record_func_map(env, &meta, func_id, insn_idx);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err)
return err;
}
if (func_id == BPF_FUNC_tail_call) {
err = check_reference_leak(env);
if (err) {
verbose(env, "tail_call would lead to reference leak\n");
return err;
}
} else if (is_release_function(func_id)) {
err = release_reference(env, &meta);
if (err)
return err;
}
regs = cur_regs(env);
/* check that flags argument in get_local_storage(map, flags) is 0,
* this is required because get_local_storage() can't return an error.
*/
if (func_id == BPF_FUNC_get_local_storage &&
!register_is_null(®s[BPF_REG_2])) {
verbose(env, "get_local_storage() doesn't support non-zero flags\n");
return -EINVAL;
}
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* update return register (already marked as written above) */
if (fn->ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */
mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
fn->ret_type == RET_PTR_TO_MAP_VALUE) {
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
*/
if (meta.map_ptr == NULL) {
verbose(env,
"kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
} else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
}
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
int id = acquire_reference_state(env, insn_idx);
if (id < 0)
return id;
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
regs[BPF_REG_0].id = id;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
return -EINVAL;
}
do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
return err;
if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
const char *err_str;
#ifdef CONFIG_PERF_EVENTS
err = get_callchain_buffers(sysctl_perf_event_max_stack);
err_str = "cannot get callchain buffer for func %s#%d\n";
#else
err = -ENOTSUPP;
err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
#endif
if (err) {
verbose(env, err_str, func_id_name(func_id), func_id);
return err;
}
env->prog->has_callchain_buf = true;
}
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
}
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
s64 res = (s64)((u64)a + (u64)b);
if (b < 0)
return res > a;
return res < a;
}
static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
if (b < 0)
return res < a;
return res > a;
}
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
s64 smin = reg->smin_value;
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n",
reg_type_str[type], val);
return false;
}
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
verbose(env, "%s pointer offset %d is not allowed\n",
reg_type_str[type], reg->off);
return false;
}
if (smin == S64_MIN) {
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
reg_type_str[type]);
return false;
}
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
verbose(env, "value %lld makes %s pointer be out of bounds\n",
smin, reg_type_str[type]);
return false;
}
return true;
}
static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
{
return &env->insn_aux_data[env->insn_idx];
}
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
u32 *ptr_limit, u8 opcode, bool off_is_neg)
{
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
(opcode == BPF_SUB && !off_is_neg);
u32 off;
switch (ptr_reg->type) {
case PTR_TO_STACK:
off = ptr_reg->off + ptr_reg->var_off.value;
if (mask_to_left)
*ptr_limit = MAX_BPF_STACK + off;
else
*ptr_limit = -off;
return 0;
case PTR_TO_MAP_VALUE:
if (mask_to_left) {
*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
} else {
off = ptr_reg->smin_value + ptr_reg->off;
*ptr_limit = ptr_reg->map_ptr->value_size - off;
}
return 0;
default:
return -EINVAL;
}
}
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
struct bpf_reg_state *dst_reg,
bool off_is_neg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_insn_aux_data *aux = cur_aux(env);
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
struct bpf_reg_state tmp;
bool ret;
if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
return 0;
/* We already marked aux for masking from non-speculative
* paths, thus we got here in the first place. We only care
* to explore bad access from here.
*/
if (vstate->speculative)
goto do_sim;
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
return 0;
/* If we arrived here from different branches with different
* limits to sanitize, then this won't work.
*/
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
return -EACCES;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
aux->alu_limit = alu_limit;
do_sim:
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
* masking when off was not within expected range. If off
* sits in dst, then we temporarily need to move ptr there
* to simulate dst (== 0) +/-= ptr. Needed, for example,
* for cases where we use K-based arithmetic in one direction
* and truncated reg-based in the other in order to explore
* bad access.
*/
if (!ptr_is_dst_reg) {
tmp = *dst_reg;
*dst_reg = *ptr_reg;
}
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg)
*dst_reg = tmp;
return !ret ? -EFAULT : 0;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
* scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
*/
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
u32 dst = insn->dst_reg, src = insn->src_reg;
u8 opcode = BPF_OP(insn->code);
int ret;
dst_reg = ®s[dst];
if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
smin_val > smax_val || umin_val > umax_val) {
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
__mark_reg_unknown(dst_reg);
return 0;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
verbose(env,
"R%d 32-bit pointer arithmetic prohibited\n",
dst);
return -EACCES;
}
switch (ptr_reg->type) {
case PTR_TO_MAP_VALUE_OR_NULL:
verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case CONST_PTR_TO_MAP:
case PTR_TO_PACKET_END:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case PTR_TO_MAP_VALUE:
if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
off_reg == dst_reg ? dst : src);
return -EACCES;
}
/* fall-through */
default:
break;
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
* The id may be overwritten later if we create a new variable offset.
*/
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
switch (opcode) {
case BPF_ADD:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose(env, "R%d tried to add from different maps or paths\n", dst);
return ret;
}
/* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field
*/
if (known && (ptr_reg->off + smin_val ==
(s64)(s32)(ptr_reg->off + smin_val))) {
/* pointer += K. Accumulate it into fixed offset */
dst_reg->smin_value = smin_ptr;
dst_reg->smax_value = smax_ptr;
dst_reg->umin_value = umin_ptr;
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->off = ptr_reg->off + smin_val;
dst_reg->raw = ptr_reg->raw;
break;
}
/* A new variable offset is created. Note that off_reg->off
* == 0, since it's a scalar.
* dst_reg gets the pointer type and since some positive
* integer value was added to the pointer, give it a new 'id'
* if it's a PTR_TO_PACKET.
* this creates a new 'base' pointer, off_reg (variable) gets
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
if (signed_add_overflows(smin_ptr, smin_val) ||
signed_add_overflows(smax_ptr, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr + smin_val;
dst_reg->smax_value = smax_ptr + smax_val;
}
if (umin_ptr + umin_val < umin_ptr ||
umax_ptr + umax_val < umax_ptr) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value = umin_ptr + umin_val;
dst_reg->umax_value = umax_ptr + umax_val;
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
dst_reg->raw = 0;
}
break;
case BPF_SUB:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose(env, "R%d tried to sub from different maps or paths\n", dst);
return ret;
}
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
verbose(env, "R%d tried to subtract pointer from scalar\n",
dst);
return -EACCES;
}
/* We don't allow subtraction from FP, because (according to
* test_verifier.c test "invalid fp arithmetic", JITs might not
* be able to deal with it.
*/
if (ptr_reg->type == PTR_TO_STACK) {
verbose(env, "R%d subtraction from stack pointer prohibited\n",
dst);
return -EACCES;
}
if (known && (ptr_reg->off - smin_val ==
(s64)(s32)(ptr_reg->off - smin_val))) {
/* pointer -= K. Subtract it from fixed offset */
dst_reg->smin_value = smin_ptr;
dst_reg->smax_value = smax_ptr;
dst_reg->umin_value = umin_ptr;
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->id = ptr_reg->id;
dst_reg->off = ptr_reg->off - smin_val;
dst_reg->raw = ptr_reg->raw;
break;
}
/* A new variable offset is created. If the subtrahend is known
* nonnegative, then any reg->range we had before is still good.
*/
if (signed_sub_overflows(smin_ptr, smax_val) ||
signed_sub_overflows(smax_ptr, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr - smax_val;
dst_reg->smax_value = smax_ptr - smin_val;
}
if (umin_ptr < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value = umin_ptr - umax_val;
dst_reg->umax_value = umax_ptr - umin_val;
}
dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
if (smin_val < 0)
dst_reg->raw = 0;
}
break;
case BPF_AND:
case BPF_OR:
case BPF_XOR:
/* bitwise ops on pointers are troublesome, prohibit. */
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
default:
/* other operators (e.g. MUL,LSH) produce non-pointer results */
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
}
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
return -EINVAL;
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
/* For unprivileged we require that resulting offset must be in bounds
* in order to be able to sanitize access later on.
*/
if (!env->allow_ptr_leaks) {
if (dst_reg->type == PTR_TO_MAP_VALUE &&
check_map_access(env, dst, dst_reg->off, 1, false)) {
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
} else if (dst_reg->type == PTR_TO_STACK &&
check_stack_access(env, dst_reg, dst_reg->off +
dst_reg->var_off.value, 1)) {
verbose(env, "R%d stack pointer arithmetic goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
}
}
return 0;
}
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
*/
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
if (insn_bitness == 32) {
/* Relevant for 32-bit RSH: Information can propagate towards
* LSB, so it isn't sufficient to only truncate the output to
* 32 bits.
*/
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
smin_val > smax_val || umin_val > umax_val) {
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
__mark_reg_unknown(dst_reg);
return 0;
}
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_ARSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
*/
dst_reg->smin_value >>= umin_val;
dst_reg->smax_value >>= umin_val;
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
*/
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
* and var_off.
*/
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
dst_reg = ®s[insn->dst_reg];
src_reg = NULL;
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
if (BPF_SRC(insn->code) == BPF_X) {
src_reg = ®s[insn->src_reg];
if (src_reg->type != SCALAR_VALUE) {
if (dst_reg->type != SCALAR_VALUE) {
/* Combining two pointers by any ALU op yields
* an arbitrary scalar. Disallow all math except
* pointer subtraction
*/
if (opcode == BPF_SUB && env->allow_ptr_leaks) {
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
}
verbose(env, "R%d pointer %s pointer prohibited\n",
insn->dst_reg,
bpf_alu_string[opcode >> 4]);
return -EACCES;
} else {
/* scalar += pointer
* This is legal, but we have to reverse our
* src/dest handling in computing the range
*/
return adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg);
}
} else if (ptr_reg) {
/* pointer += scalar */
return adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
}
} else {
/* Pretend the src is a reg with a known value, since we only
* need to be able to read from this state.
*/
off_reg.type = SCALAR_VALUE;
__mark_reg_known(&off_reg, insn->imm);
src_reg = &off_reg;
if (ptr_reg) /* pointer += K */
return adjust_ptr_min_max_vals(env, insn,
ptr_reg, src_reg);
}
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
print_verifier_state(env, state);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
print_verifier_state(env, state);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
}
/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode == BPF_END || opcode == BPF_NEG) {
if (opcode == BPF_NEG) {
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
verbose(env, "BPF_NEG uses reserved fields\n");
return -EINVAL;
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
BPF_CLASS(insn->code) == BPF_ALU64) {
verbose(env, "BPF_END uses reserved fields\n");
return -EINVAL;
}
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
}
/* check dest operand, mark as required later */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = regs + insn->src_reg;
struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
verbose(env,
"R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
} else {
mark_reg_unknown(env, regs,
insn->dst_reg);
}
coerce_reg_to_size(dst_reg, 4);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
/* clear any state __mark_reg_known doesn't set */
mark_reg_unknown(env, regs, insn->dst_reg);
regs[insn->dst_reg].type = SCALAR_VALUE;
if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
insn->imm);
} else {
__mark_reg_known(regs + insn->dst_reg,
(u32)insn->imm);
}
}
} else if (opcode > BPF_END) {
verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
return -EINVAL;
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
verbose(env, "div by zero\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
verbose(env, "invalid shift %d\n", insn->imm);
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
return adjust_reg_min_max_vals(env, insn);
}
return 0;
}
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
int i, j;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
/* This doesn't give us any range */
return;
if (dst_reg->umax_value > MAX_PACKET_OFF ||
dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
/* Risk of overflow. For instance, ptr + (1<<63) may be less
* than pkt_end, but that's because it's also less than pkt.
*/
return;
new_range = dst_reg->off;
if (range_right_open)
new_range--;
/* Examples for register markings:
*
* pkt_data in dst register:
*
* r2 = r3;
* r2 += 8;
* if (r2 > pkt_end) goto <handle exception>
* <access okay>
*
* r2 = r3;
* r2 += 8;
* if (r2 < pkt_end) goto <access okay>
* <handle exception>
*
* Where:
* r2 == dst_reg, pkt_end == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* pkt_data in src register:
*
* r2 = r3;
* r2 += 8;
* if (pkt_end >= r2) goto <access okay>
* <handle exception>
*
* r2 = r3;
* r2 += 8;
* if (pkt_end <= r2) goto <handle exception>
* <access okay>
*
* Where:
* pkt_end == dst_reg, r2 == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
* or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
* and [r3, r3 + 8-1) respectively is safe to access depending on
* the check.
*/
/* If our ids match, then we must have the same max_value. And we
* don't care about the other reg's fixed offset, since if it's too big
* the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == type && regs[i].id == dst_reg->id)
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
}
/* compute branch direction of the expression "if (reg opcode val) goto target;"
* and return:
* 1 - branch will be taken and "goto target" will be executed
* 0 - branch will not be taken and fall-through to next insn
* -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
*/
static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
{
if (__is_pointer_value(false, reg))
return -1;
switch (opcode) {
case BPF_JEQ:
if (tnum_is_const(reg->var_off))
return !!tnum_equals_const(reg->var_off, val);
break;
case BPF_JNE:
if (tnum_is_const(reg->var_off))
return !tnum_equals_const(reg->var_off, val);
break;
case BPF_JSET:
if ((~reg->var_off.mask & reg->var_off.value) & val)
return 1;
if (!((reg->var_off.mask | reg->var_off.value) & val))
return 0;
break;
case BPF_JGT:
if (reg->umin_value > val)
return 1;
else if (reg->umax_value <= val)
return 0;
break;
case BPF_JSGT:
if (reg->smin_value > (s64)val)
return 1;
else if (reg->smax_value < (s64)val)
return 0;
break;
case BPF_JLT:
if (reg->umax_value < val)
return 1;
else if (reg->umin_value >= val)
return 0;
break;
case BPF_JSLT:
if (reg->smax_value < (s64)val)
return 1;
else if (reg->smin_value >= (s64)val)
return 0;
break;
case BPF_JGE:
if (reg->umin_value >= val)
return 1;
else if (reg->umax_value < val)
return 0;
break;
case BPF_JSGE:
if (reg->smin_value >= (s64)val)
return 1;
else if (reg->smax_value < (s64)val)
return 0;
break;
case BPF_JLE:
if (reg->umax_value <= val)
return 1;
else if (reg->umin_value > val)
return 0;
break;
case BPF_JSLE:
if (reg->smax_value <= (s64)val)
return 1;
else if (reg->smin_value > (s64)val)
return 0;
break;
}
return -1;
}
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
* In JEQ/JNE cases we also adjust the var_off values.
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
* the same object, but we don't bother with that.
* Since false_reg and true_reg have the same type by construction, we
* only need to check one of them for pointerness.
*/
if (__is_pointer_value(false, false_reg))
return;
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
__mark_reg_known(true_reg, val);
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
__mark_reg_known(false_reg, val);
break;
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
if (is_power_of_2(val))
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
case BPF_JGT:
false_reg->umax_value = min(false_reg->umax_value, val);
true_reg->umin_value = max(true_reg->umin_value, val + 1);
break;
case BPF_JSGT:
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
break;
case BPF_JLT:
false_reg->umin_value = max(false_reg->umin_value, val);
true_reg->umax_value = min(true_reg->umax_value, val - 1);
break;
case BPF_JSLT:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
break;
case BPF_JGE:
false_reg->umax_value = min(false_reg->umax_value, val - 1);
true_reg->umin_value = max(true_reg->umin_value, val);
break;
case BPF_JSGE:
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
break;
case BPF_JLE:
false_reg->umin_value = max(false_reg->umin_value, val + 1);
true_reg->umax_value = min(true_reg->umax_value, val);
break;
case BPF_JSLE:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
break;
default:
break;
}
__reg_deduce_bounds(false_reg);
__reg_deduce_bounds(true_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(false_reg);
__update_reg_bounds(true_reg);
}
/* Same as above, but for the case that dst_reg holds a constant and src_reg is
* the variable reg.
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
if (__is_pointer_value(false, false_reg))
return;
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
__mark_reg_known(true_reg, val);
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
__mark_reg_known(false_reg, val);
break;
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
if (is_power_of_2(val))
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
case BPF_JGT:
true_reg->umax_value = min(true_reg->umax_value, val - 1);
false_reg->umin_value = max(false_reg->umin_value, val);
break;
case BPF_JSGT:
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
break;
case BPF_JLT:
true_reg->umin_value = max(true_reg->umin_value, val + 1);
false_reg->umax_value = min(false_reg->umax_value, val);
break;
case BPF_JSLT:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
break;
case BPF_JGE:
true_reg->umax_value = min(true_reg->umax_value, val);
false_reg->umin_value = max(false_reg->umin_value, val + 1);
break;
case BPF_JSGE:
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
break;
case BPF_JLE:
true_reg->umin_value = max(true_reg->umin_value, val);
false_reg->umax_value = min(false_reg->umax_value, val - 1);
break;
case BPF_JSLE:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
break;
default:
break;
}
__reg_deduce_bounds(false_reg);
__reg_deduce_bounds(true_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(false_reg);
__update_reg_bounds(true_reg);
}
/* Regs are known to be equal, so intersect their min/max/var_off */
static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
struct bpf_reg_state *dst_reg)
{
src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
dst_reg->umin_value);
src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
dst_reg->umax_value);
src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
dst_reg->smin_value);
src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
dst_reg->smax_value);
src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
dst_reg->var_off);
/* We might have learned new bounds from the var_off. */
__update_reg_bounds(src_reg);
__update_reg_bounds(dst_reg);
/* We might have learned something about the sign bit. */
__reg_deduce_bounds(src_reg);
__reg_deduce_bounds(dst_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(src_reg);
__reg_bound_offset(dst_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(src_reg);
__update_reg_bounds(dst_reg);
}
static void reg_combine_min_max(struct bpf_reg_state *true_src,
struct bpf_reg_state *true_dst,
struct bpf_reg_state *false_src,
struct bpf_reg_state *false_dst,
u8 opcode)
{
switch (opcode) {
case BPF_JEQ:
__reg_combine_min_max(true_src, true_dst);
break;
case BPF_JNE:
__reg_combine_min_max(false_src, false_dst);
break;
}
}
static void mark_ptr_or_null_reg(struct bpf_func_state *state,
struct bpf_reg_state *reg, u32 id,
bool is_null)
{
if (reg_type_may_be_null(reg->type) && reg->id == id) {
/* Old offset (both fixed and variable parts) should
* have been known-zero, because we don't allow pointer
* arithmetic on pointers that might be NULL.
*/
if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
!tnum_equals_const(reg->var_off, 0) ||
reg->off)) {
__mark_reg_known_zero(reg);
reg->off = 0;
}
if (is_null) {
reg->type = SCALAR_VALUE;
} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
if (reg->map_ptr->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
reg->map_ptr = reg->map_ptr->inner_map_meta;
} else {
reg->type = PTR_TO_MAP_VALUE;
}
} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
reg->type = PTR_TO_SOCKET;
}
if (is_null || !reg_is_refcounted(reg)) {
/* We don't need id from this point onwards anymore,
* thus we should better reset it, so that state
* pruning has chances to take effect.
*/
reg->id = 0;
}
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg, *regs = state->regs;
u32 id = regs[regno].id;
int i, j;
if (reg_is_refcounted_or_null(®s[regno]) && is_null)
__release_reference_state(state, id);
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, ®s[i], id, is_null);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
mark_ptr_or_null_reg(state, reg, id, is_null);
}
}
}
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg,
struct bpf_verifier_state *this_branch,
struct bpf_verifier_state *other_branch)
{
if (BPF_SRC(insn->code) != BPF_X)
return false;
switch (BPF_OP(insn->code)) {
case BPF_JGT:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end > pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, true);
} else {
return false;
}
break;
case BPF_JLT:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end < pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, false);
} else {
return false;
}
break;
case BPF_JGE:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, false);
} else {
return false;
}
break;
case BPF_JLE:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, true);
} else {
return false;
}
break;
default:
return false;
}
return true;
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_JSLE) {
verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
if (BPF_SRC(insn->code) == BPF_K) {
int pred = is_branch_taken(dst_reg, insn->imm, opcode);
if (pred == 1) {
/* only follow the goto, ignore fall-through */
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
/* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false);
if (!other_branch)
return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same
* object, I suppose, but we don't support that right now), because
* otherwise the different base pointers mean the offsets aren't
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
®s[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
®s[insn->src_reg],
®s[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
reg_type_may_be_null(dst_reg->type)) {
/* Mark all identical registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
mark_ptr_or_null_regs(this_branch, insn->dst_reg,
opcode == BPF_JNE);
mark_ptr_or_null_regs(other_branch, insn->dst_reg,
opcode == BPF_JEQ);
} else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
this_branch, other_branch) &&
is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->dst_reg);
return -EACCES;
}
if (env->log.level)
print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}
/* return the map pointer stored inside BPF_LD_IMM64 instruction */
static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
{
u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
return (struct bpf_map *) (unsigned long) imm64;
}
/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
verbose(env, "invalid BPF_LD_IMM insn\n");
return -EINVAL;
}
if (insn->off != 0) {
verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
return -EINVAL;
}
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
if (insn->src_reg == 0) {
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(®s[insn->dst_reg], imm);
return 0;
}
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
return 0;
}
static bool may_access_skb(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
return true;
default:
return false;
}
}
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
* preserve R6-R9, and store return value into R0
*
* Implicit input:
* ctx == skb == R6 == CTX
*
* Explicit input:
* SRC == any register
* IMM == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 mode = BPF_MODE(insn->code);
int i, err;
if (!may_access_skb(env->prog->type)) {
verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
return -EINVAL;
}
if (!env->ops->gen_ld_abs) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
if (env->subprog_cnt > 1) {
/* when program has LD_ABS insn JITs and interpreter assume
* that r1 == ctx == skb which is not the case for callees
* that can have arbitrary arguments. It's problematic
* for main prog as well since JITs would need to analyze
* all functions in order to make proper register save/restore
* decisions in the main prog. Hence disallow LD_ABS with calls
*/
verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
return -EINVAL;
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
return -EINVAL;
}
/* check whether implicit source operand (register R6) is readable */
err = check_reg_arg(env, BPF_REG_6, SRC_OP);
if (err)
return err;
/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
* gen_ld_abs() may terminate the program at runtime, leading to
* reference leak.
*/
err = check_reference_leak(env);
if (err) {
verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
return err;
}
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
}
if (mode == BPF_IND) {
/* check explicit source operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
}
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* mark destination R0 register as readable, since it contains
* the value fetched from the packet.
* Already marked as written above.
*/
mark_reg_unknown(env, regs, BPF_REG_0);
return 0;
}
static int check_return_code(struct bpf_verifier_env *env)
{
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
break;
default:
return 0;
}
reg = cur_regs(env) + BPF_REG_0;
if (reg->type != SCALAR_VALUE) {
verbose(env, "At program exit the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]);
return -EINVAL;
}
if (!tnum_in(range, reg->var_off)) {
verbose(env, "At program exit the register R0 ");
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "has value %s", tn_buf);
} else {
verbose(env, "has unknown scalar value");
}
verbose(env, " should have been 0 or 1\n");
return -EINVAL;
}
return 0;
}
/* non-recursive DFS pseudo code
* 1 procedure DFS-iterative(G,v):
* 2 label v as discovered
* 3 let S be a stack
* 4 S.push(v)
* 5 while S is not empty
* 6 t <- S.pop()
* 7 if t is what we're looking for:
* 8 return t
* 9 for all edges e in G.adjacentEdges(t) do
* 10 if edge e is already labelled
* 11 continue with the next edge
* 12 w <- G.adjacentVertex(t,e)
* 13 if vertex w is not discovered and not explored
* 14 label e as tree-edge
* 15 label w as discovered
* 16 S.push(w)
* 17 continue at 5
* 18 else if vertex w is discovered
* 19 label e as back-edge
* 20 else
* 21 // vertex w is explored
* 22 label e as forward- or cross-edge
* 23 label t as explored
* 24 S.pop()
*
* convention:
* 0x10 - discovered
* 0x11 - discovered and fall-through edge labelled
* 0x12 - discovered and fall-through and branch edges labelled
* 0x20 - explored
*/
enum {
DISCOVERED = 0x10,
EXPLORED = 0x20,
FALLTHROUGH = 1,
BRANCH = 2,
};
#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
static int *insn_state;
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
* e - edge
*/
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose_linfo(env, t, "%d: ", t);
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
return 0;
}
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
static int check_cfg(struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
int ret = 0;
int i, t;
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_stack) {
kfree(insn_state);
return -ENOMEM;
}
insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
insn_stack[0] = 0; /* 0 is the first instruction */
cur_stack = 1;
peek_stack:
if (cur_stack == 0)
goto check_state;
t = insn_stack[cur_stack - 1];
if (BPF_CLASS(insns[t].code) == BPF_JMP) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
goto mark_explored;
} else if (opcode == BPF_CALL) {
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
if (insns[t].src_reg == BPF_PSEUDO_CALL) {
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
goto err_free;
}
/* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1,
FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else {
/* conditional jump with two edges */
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else {
/* all other non-branch instructions with single
* fall-through edge
*/
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
mark_explored:
insn_state[t] = EXPLORED;
if (cur_stack-- <= 0) {
verbose(env, "pop stack internal bug\n");
ret = -EFAULT;
goto err_free;
}
goto peek_stack;
check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
}
ret = 0; /* cfg looks good */
err_free:
kfree(insn_state);
kfree(insn_stack);
return ret;
}
/* The minimum supported BTF func info size */
#define MIN_BPF_FUNCINFO_SIZE 8
#define MAX_FUNCINFO_REC_SIZE 252
static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
u32 i, nfuncs, urec_size, min_size, prev_offset;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
int ret = 0;
nfuncs = attr->func_info_cnt;
if (!nfuncs)
return 0;
if (nfuncs != env->subprog_cnt) {
verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
return -EINVAL;
}
urec_size = attr->func_info_rec_size;
if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
urec_size > MAX_FUNCINFO_REC_SIZE ||
urec_size % sizeof(u32)) {
verbose(env, "invalid func info rec size %u\n", urec_size);
return -EINVAL;
}
prog = env->prog;
btf = prog->aux->btf;
urecord = u64_to_user_ptr(attr->func_info);
min_size = min_t(u32, krec_size, urec_size);
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
if (!krecord)
return -ENOMEM;
for (i = 0; i < nfuncs; i++) {
ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
if (ret) {
if (ret == -E2BIG) {
verbose(env, "nonzero tailing record in func info");
/* set the size kernel expects so loader can zero
* out the rest of the record.
*/
if (put_user(min_size, &uattr->func_info_rec_size))
ret = -EFAULT;
}
goto err_free;
}
if (copy_from_user(&krecord[i], urecord, min_size)) {
ret = -EFAULT;
goto err_free;
}
/* check insn_off */
if (i == 0) {
if (krecord[i].insn_off) {
verbose(env,
"nonzero insn_off %u for the first func info record",
krecord[i].insn_off);
ret = -EINVAL;
goto err_free;
}
} else if (krecord[i].insn_off <= prev_offset) {
verbose(env,
"same or smaller insn offset (%u) than previous func info record (%u)",
krecord[i].insn_off, prev_offset);
ret = -EINVAL;
goto err_free;
}
if (env->subprog_info[i].start != krecord[i].insn_off) {
verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
ret = -EINVAL;
goto err_free;
}
/* check type_id */
type = btf_type_by_id(btf, krecord[i].type_id);
if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
verbose(env, "invalid type id %d in func info",
krecord[i].type_id);
ret = -EINVAL;
goto err_free;
}
prev_offset = krecord[i].insn_off;
urecord += urec_size;
}
prog->aux->func_info = krecord;
prog->aux->func_info_cnt = nfuncs;
return 0;
err_free:
kvfree(krecord);
return ret;
}
static void adjust_btf_func(struct bpf_verifier_env *env)
{
int i;
if (!env->prog->aux->func_info)
return;
for (i = 0; i < env->subprog_cnt; i++)
env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
}
#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
sizeof(((struct bpf_line_info *)(0))->line_col))
#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
static int check_btf_line(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
struct bpf_subprog_info *sub;
struct bpf_line_info *linfo;
struct bpf_prog *prog;
const struct btf *btf;
void __user *ulinfo;
int err;
nr_linfo = attr->line_info_cnt;
if (!nr_linfo)
return 0;
rec_size = attr->line_info_rec_size;
if (rec_size < MIN_BPF_LINEINFO_SIZE ||
rec_size > MAX_LINEINFO_REC_SIZE ||
rec_size & (sizeof(u32) - 1))
return -EINVAL;
/* Need to zero it in case the userspace may
* pass in a smaller bpf_line_info object.
*/
linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
GFP_KERNEL | __GFP_NOWARN);
if (!linfo)
return -ENOMEM;
prog = env->prog;
btf = prog->aux->btf;
s = 0;
sub = env->subprog_info;
ulinfo = u64_to_user_ptr(attr->line_info);
expected_size = sizeof(struct bpf_line_info);
ncopy = min_t(u32, expected_size, rec_size);
for (i = 0; i < nr_linfo; i++) {
err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
if (err) {
if (err == -E2BIG) {
verbose(env, "nonzero tailing record in line_info");
if (put_user(expected_size,
&uattr->line_info_rec_size))
err = -EFAULT;
}
goto err_free;
}
if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
err = -EFAULT;
goto err_free;
}
/*
* Check insn_off to ensure
* 1) strictly increasing AND
* 2) bounded by prog->len
*
* The linfo[0].insn_off == 0 check logically falls into
* the later "missing bpf_line_info for func..." case
* because the first linfo[0].insn_off must be the
* first sub also and the first sub must have
* subprog_info[0].start == 0.
*/
if ((i && linfo[i].insn_off <= prev_offset) ||
linfo[i].insn_off >= prog->len) {
verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
i, linfo[i].insn_off, prev_offset,
prog->len);
err = -EINVAL;
goto err_free;
}
if (!prog->insnsi[linfo[i].insn_off].code) {
verbose(env,
"Invalid insn code at line_info[%u].insn_off\n",
i);
err = -EINVAL;
goto err_free;
}
if (!btf_name_by_offset(btf, linfo[i].line_off) ||
!btf_name_by_offset(btf, linfo[i].file_name_off)) {
verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
err = -EINVAL;
goto err_free;
}
if (s != env->subprog_cnt) {
if (linfo[i].insn_off == sub[s].start) {
sub[s].linfo_idx = i;
s++;
} else if (sub[s].start < linfo[i].insn_off) {
verbose(env, "missing bpf_line_info for func#%u\n", s);
err = -EINVAL;
goto err_free;
}
}
prev_offset = linfo[i].insn_off;
ulinfo += rec_size;
}
if (s != env->subprog_cnt) {
verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
env->subprog_cnt - s, s);
err = -EINVAL;
goto err_free;
}
prog->aux->linfo = linfo;
prog->aux->nr_linfo = nr_linfo;
return 0;
err_free:
kvfree(linfo);
return err;
}
static int check_btf_info(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct btf *btf;
int err;
if (!attr->func_info_cnt && !attr->line_info_cnt)
return 0;
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf))
return PTR_ERR(btf);
env->prog->aux->btf = btf;
err = check_btf_func(env, attr, uattr);
if (err)
return err;
err = check_btf_line(env, attr, uattr);
if (err)
return err;
return 0;
}
/* check %cur's range satisfies %old's */
static bool range_within(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
{
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
old->smin_value <= cur->smin_value &&
old->smax_value >= cur->smax_value;
}
/* Maximum number of register states that can exist at once */
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
struct idpair {
u32 old;
u32 cur;
};
/* If in the old state two registers had the same id, then they need to have
* the same id in the new state as well. But that id could be different from
* the old state, so we need to track the mapping from old to new ids.
* Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
* regs with old id 5 must also have new id 9 for the new state to be safe. But
* regs with a different old id could still have new id 9, we don't care about
* that.
* So we look through our idmap to see if this old id has been seen before. If
* so, we require the new id to match; otherwise, we add the id pair to the map.
*/
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
{
unsigned int i;
for (i = 0; i < ID_MAP_SIZE; i++) {
if (!idmap[i].old) {
/* Reached an empty slot; haven't seen this id before */
idmap[i].old = old_id;
idmap[i].cur = cur_id;
return true;
}
if (idmap[i].old == old_id)
return idmap[i].cur == cur_id;
}
/* We ran out of idmap slots, which should be impossible */
WARN_ON_ONCE(1);
return false;
}
static void clean_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *st)
{
enum bpf_reg_liveness live;
int i, j;
for (i = 0; i < BPF_REG_FP; i++) {
live = st->regs[i].live;
/* liveness must not touch this register anymore */
st->regs[i].live |= REG_LIVE_DONE;
if (!(live & REG_LIVE_READ))
/* since the register is unused, clear its state
* to make further comparison simpler
*/
__mark_reg_not_init(&st->regs[i]);
}
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
live = st->stack[i].spilled_ptr.live;
/* liveness must not touch this stack slot anymore */
st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
if (!(live & REG_LIVE_READ)) {
__mark_reg_not_init(&st->stack[i].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
st->stack[i].slot_type[j] = STACK_INVALID;
}
}
}
static void clean_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state *st)
{
int i;
if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
/* all regs in this state in all frames were already marked */
return;
for (i = 0; i <= st->curframe; i++)
clean_func_state(env, st->frame[i]);
}
/* the parentage chains form a tree.
* the verifier states are added to state lists at given insn and
* pushed into state stack for future exploration.
* when the verifier reaches bpf_exit insn some of the verifer states
* stored in the state lists have their final liveness state already,
* but a lot of states will get revised from liveness point of view when
* the verifier explores other branches.
* Example:
* 1: r0 = 1
* 2: if r1 == 100 goto pc+1
* 3: r0 = 2
* 4: exit
* when the verifier reaches exit insn the register r0 in the state list of
* insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
* of insn 2 and goes exploring further. At the insn 4 it will walk the
* parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
*
* Since the verifier pushes the branch states as it sees them while exploring
* the program the condition of walking the branch instruction for the second
* time means that all states below this branch were already explored and
* their final liveness markes are already propagated.
* Hence when the verifier completes the search of state list in is_state_visited()
* we can call this clean_live_states() function to mark all liveness states
* as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
* will not be used.
* This function also clears the registers and stack for states that !READ
* to simplify state merging.
*
* Important note here that walking the same branch instruction in the callee
* doesn't meant that the states are DONE. The verifier has to compare
* the callsites
*/
static void clean_live_states(struct bpf_verifier_env *env, int insn,
struct bpf_verifier_state *cur)
{
struct bpf_verifier_state_list *sl;
int i;
sl = env->explored_states[insn];
if (!sl)
return;
while (sl != STATE_LIST_MARK) {
if (sl->state.curframe != cur->curframe)
goto next;
for (i = 0; i <= cur->curframe; i++)
if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
goto next;
clean_verifier_state(env, &sl->state);
next:
sl = sl->next;
}
}
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
bool equal;
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
if (rold->type == PTR_TO_STACK)
/* two stack pointers are equal only if they're pointing to
* the same stack frame, since fp-8 in foo != fp-8 in bar
*/
return equal && rold->frameno == rcur->frameno;
if (equal)
return true;
if (rold->type == NOT_INIT)
/* explored state can't have used this */
return true;
if (rcur->type == NOT_INIT)
return false;
switch (rold->type) {
case SCALAR_VALUE:
if (rcur->type == SCALAR_VALUE) {
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* We're trying to use a pointer in place of a scalar.
* Even if the scalar was unbounded, this could lead to
* pointer leaks because scalars are allowed to leak
* while pointers are not. We could make this safe in
* special cases if root is calling us, but it's
* probably not worth the hassle.
*/
return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
* We don't care about the 'id' value, because nothing
* uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
*/
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_VALUE_OR_NULL:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
if (rcur->type != rold->type)
return false;
/* We must have at least as much range as the old ptr
* did, so that any accesses which were safe before are
* still safe. This is true even if old range < old off,
* since someone could have accessed through (ptr - k), or
* even done ptr -= k in a register, to get a safe access.
*/
if (rold->range > rcur->range)
return false;
/* If the offsets don't match, we can't trust our alignment;
* nor can we be sure that we won't fall out of range.
*/
if (rold->off != rcur->off)
return false;
/* id relations must be preserved */
if (rold->id && !check_ids(rold->id, rcur->id, idmap))
return false;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_PACKET_END:
case PTR_TO_FLOW_KEYS:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
default:
/* Don't know what's going on, just say it's not safe */
return false;
}
/* Shouldn't get here; if we do, say it's not safe */
WARN_ON_ONCE(1);
return false;
}
static bool stacksafe(struct bpf_func_state *old,
struct bpf_func_state *cur,
struct idpair *idmap)
{
int i, spi;
/* walk slots of the explored stack and ignore any additional
* slots in the current stack, since explored(safe) state
* didn't use them
*/
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
i += BPF_REG_SIZE - 1;
/* explored state didn't use this */
continue;
}
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
/* explored stack has more populated slots than current stack
* and these slots were used
*/
if (i >= cur->allocated_stack)
return false;
/* if old state was safe with misc data in the stack
* it will be safe with zero-initialized stack.
* The opposite is not true
*/
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
* this stack slot, but current has has STACK_MISC ->
* this verifier states are not equivalent,
* return false to continue verification of this path
*/
return false;
if (i % BPF_REG_SIZE)
continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL)
continue;
if (!regsafe(&old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr,
idmap))
/* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
* (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
* but current path has stored:
* (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
return false;
}
return true;
}
static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
{
if (old->acquired_refs != cur->acquired_refs)
return false;
return !memcmp(old->refs, cur->refs,
sizeof(*old->refs) * old->acquired_refs);
}
/* compare two verifier states
*
* all states stored in state_list are known to be valid, since
* verifier reached 'bpf_exit' instruction through them
*
* this function is called when verifier exploring different branches of
* execution popped from the state stack. If it sees an old state that has
* more strict register state and more strict stack state then this execution
* branch doesn't need to be explored further, since verifier already
* concluded that more strict state leads to valid finish.
*
* Therefore two states are equivalent if register state is more conservative
* and explored stack state is more conservative than the current one.
* Example:
* explored current
* (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
* (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
*
* In other words if current stack state (one being explored) has more
* valid slots than old one that already passed validation, it means
* the verifier can stop exploring and conclude that current state is valid too
*
* Similarly with registers. If explored state has register type as invalid
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool func_states_equal(struct bpf_func_state *old,
struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
int i;
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
/* If we failed to allocate the idmap, just say it's not safe */
if (!idmap)
return false;
for (i = 0; i < MAX_BPF_REG; i++) {
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
goto out_free;
}
if (!stacksafe(old, cur, idmap))
goto out_free;
if (!refsafe(old, cur))
goto out_free;
ret = true;
out_free:
kfree(idmap);
return ret;
}
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
int i;
if (old->curframe != cur->curframe)
return false;
/* Verification state from speculative execution simulation
* must never prune a non-speculative execution one.
*/
if (old->speculative && !cur->speculative)
return false;
/* for states to be equal callsites have to be the same
* and all frame states need to be equivalent
*/
for (i = 0; i <= old->curframe; i++) {
if (old->frame[i]->callsite != cur->frame[i]->callsite)
return false;
if (!func_states_equal(old->frame[i], cur->frame[i]))
return false;
}
return true;
}
/* A write screens off any subsequent reads; but write marks come from the
* straight-line code between a state and its parent. When we arrive at an
* equivalent state (jump target or such) we didn't arrive by the straight-line
* code, so read marks in the state must propagate to the parent regardless
* of the state's write marks. That's what 'parent == state->parent' comparison
* in mark_reg_read() is for.
*/
static int propagate_liveness(struct bpf_verifier_env *env,
const struct bpf_verifier_state *vstate,
struct bpf_verifier_state *vparent)
{
int i, frame, err = 0;
struct bpf_func_state *state, *parent;
if (vparent->curframe != vstate->curframe) {
WARN(1, "propagate_live: parent frame %d current frame %d\n",
vparent->curframe, vstate->curframe);
return -EFAULT;
}
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
&vparent->frame[vstate->curframe]->regs[i]);
if (err)
return err;
}
}
/* ... and stack slots */
for (frame = 0; frame <= vstate->curframe; frame++) {
state = vstate->frame[frame];
parent = vparent->frame[frame];
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
i < parent->allocated_stack / BPF_REG_SIZE; i++) {
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
mark_reg_read(env, &state->stack[i].spilled_ptr,
&parent->stack[i].spilled_ptr);
}
}
return err;
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
int i, j, err, states_cnt = 0;
sl = env->explored_states[insn_idx];
if (!sl)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
clean_live_states(env, insn_idx, cur);
while (sl != STATE_LIST_MARK) {
if (states_equal(env, &sl->state, cur)) {
/* reached equivalent register/stack state,
* prune the search.
* Registers read by the continuation are read by us.
* If we have any write marks in env->cur_state, they
* will prevent corresponding reads in the continuation
* from reaching our parent (an explored_state). Our
* own state will get the read marks recorded, but
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
err = propagate_liveness(env, &sl->state, cur);
if (err)
return err;
return 1;
}
sl = sl->next;
states_cnt++;
}
if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return 0;
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach outer most bpf_exit (which means it's safe)
* or it will be rejected. Since there are no loops, we won't be
* seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
* again on the way to bpf_exit
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
return -ENOMEM;
/* add new state to the head of linked list */
new = &new_sl->state;
err = copy_verifier_state(new, cur);
if (err) {
free_verifier_state(new, false);
kfree(new_sl);
return err;
}
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
/* connect new state to parentage chain. Current frame needs all
* registers connected. Only r6 - r9 of the callers are alive (pushed
* to the stack implicitly by JITs) so in callers' frames connect just
* r6 - r9 as an optimization. Callers will have r1 - r5 connected to
* the state of the call instruction (with WRITTEN set), and r0 comes
* from callee with its full parentage chain, anyway.
*/
for (j = 0; j <= cur->curframe; j++)
for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
* their parent and current state never has children yet. Only
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
/* all stack frames are accessible from callee, clear them all */
for (j = 0; j <= cur->curframe; j++) {
struct bpf_func_state *frame = cur->frame[j];
struct bpf_func_state *newframe = new->frame[j];
for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
frame->stack[i].spilled_ptr.parent =
&newframe->stack[i].spilled_ptr;
}
}
return 0;
}
/* Return true if it's OK to have the same insn return a different type. */
static bool reg_type_mismatch_ok(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_CTX:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
return false;
default:
return true;
}
}
/* If an instruction was previously used with particular pointer types, then we
* need to be careful to avoid cases such as the below, where it may be ok
* for one branch accessing the pointer, but not ok for the other branch:
*
* R1 = sock_ptr
* goto X;
* ...
* R1 = some_other_valid_ptr;
* goto X;
* ...
* R2 = *(u32 *)(R1 + 0);
*/
static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
{
return src != prev && (!reg_type_mismatch_ok(src) ||
!reg_type_mismatch_ok(prev));
}
static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
int insn_cnt = env->prog->len, i;
int insn_processed = 0;
bool do_print_state = false;
env->prev_linfo = NULL;
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->curframe = 0;
state->speculative = false;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
if (!state->frame[0]) {
kfree(state);
return -ENOMEM;
}
env->cur_state = state;
init_func_state(env, state->frame[0],
BPF_MAIN_FUNC /* callsite */,
0 /* frameno */,
0 /* subprogno, zero == main subprog */);
for (;;) {
struct bpf_insn *insn;
u8 class;
int err;
if (env->insn_idx >= insn_cnt) {
verbose(env, "invalid insn idx %d insn_cnt %d\n",
env->insn_idx, insn_cnt);
return -EFAULT;
}
insn = &insns[env->insn_idx];
class = BPF_CLASS(insn->code);
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
verbose(env,
"BPF program is too large. Processed %d insn\n",
insn_processed);
return -E2BIG;
}
err = is_state_visited(env, env->insn_idx);
if (err < 0)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (env->log.level) {
if (do_print_state)
verbose(env, "\nfrom %d to %d%s: safe\n",
env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ?
" (speculative execution)" : "");
else
verbose(env, "%d: safe\n", env->insn_idx);
}
goto process_bpf_exit;
}
if (signal_pending(current))
return -EAGAIN;
if (need_resched())
cond_resched();
if (env->log.level > 1 || (env->log.level && do_print_state)) {
if (env->log.level > 1)
verbose(env, "%d:", env->insn_idx);
else
verbose(env, "\nfrom %d to %d%s:",
env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ?
" (speculative execution)" : "");
print_verifier_state(env, state->frame[state->curframe]);
do_print_state = false;
}
if (env->log.level) {
const struct bpf_insn_cbs cbs = {
.cb_print = verbose,
.private_data = env,
};
verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "%d: ", env->insn_idx);
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
if (bpf_prog_is_dev_bound(env->prog->aux)) {
err = bpf_prog_offload_verify_insn(env, env->insn_idx,
env->prev_insn_idx);
if (err)
return err;
}
regs = cur_regs(env);
env->insn_aux_data[env->insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
enum bpf_reg_type *prev_src_type, src_reg_type;
/* check for reserved fields is already done */
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
src_reg_type = regs[insn->src_reg].type;
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
err = check_mem_access(env, env->insn_idx, insn->src_reg,
insn->off, BPF_SIZE(insn->code),
BPF_READ, insn->dst_reg, false);
if (err)
return err;
prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
* save type to validate intersecting paths
*/
*prev_src_type = src_reg_type;
} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
* src_reg == ctx in one branch and
* src_reg == stack|map in some other branch.
* Reject it.
*/
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_STX) {
enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, env->insn_idx, insn);
if (err)
return err;
env->insn_idx++;
continue;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
insn->off, BPF_SIZE(insn->code),
BPF_WRITE, insn->src_reg, false);
if (err)
return err;
prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
if (*prev_dst_type == NOT_INIT) {
*prev_dst_type = dst_reg_type;
} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_ST uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_ctx_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
insn->off, BPF_SIZE(insn->code),
BPF_WRITE, -1, false);
if (err)
return err;
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
(insn->src_reg != BPF_REG_0 &&
insn->src_reg != BPF_PSEUDO_CALL) ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
if (insn->src_reg == BPF_PSEUDO_CALL)
err = check_func_call(env, insn, &env->insn_idx);
else
err = check_helper_call(env, insn->imm, env->insn_idx);
if (err)
return err;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL;
}
env->insn_idx += insn->off + 1;
continue;
} else if (opcode == BPF_EXIT) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
if (state->curframe) {
/* exit from nested function */
env->prev_insn_idx = env->insn_idx;
err = prepare_func_exit(env, &env->insn_idx);
if (err)
return err;
do_print_state = true;
continue;
}
err = check_reference_leak(env);
if (err)
return err;
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
* something into it earlier
*/
err = check_reg_arg(env, BPF_REG_0, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, BPF_REG_0)) {
verbose(env, "R0 leaks addr as return value\n");
return -EACCES;
}
err = check_return_code(env);
if (err)
return err;
process_bpf_exit:
err = pop_stack(env, &env->prev_insn_idx,
&env->insn_idx);
if (err < 0) {
if (err != -ENOENT)
return err;
break;
} else {
do_print_state = true;
continue;
}
} else {
err = check_cond_jmp_op(env, insn, &env->insn_idx);
if (err)
return err;
}
} else if (class == BPF_LD) {
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
err = check_ld_abs(env, insn);
if (err)
return err;
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
return err;
env->insn_idx++;
env->insn_aux_data[env->insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
}
} else {
verbose(env, "unknown insn class %d\n", class);
return -EINVAL;
}
env->insn_idx++;
}
verbose(env, "processed %d insns (limit %d), stack depth ",
insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
for (i = 0; i < env->subprog_cnt; i++) {
u32 depth = env->subprog_info[i].stack_depth;
verbose(env, "%d", depth);
if (i + 1 < env->subprog_cnt)
verbose(env, "+");
}
verbose(env, "\n");
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
return 0;
}
static int check_map_prealloc(struct bpf_map *map)
{
return (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
!(map->map_flags & BPF_F_NO_PREALLOC);
}
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
* preallocated hash maps, since doing memory allocation
* in overflow_handler can crash depending on where nmi got
* triggered.
*/
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (!check_map_prealloc(map)) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
if (map->inner_map_meta &&
!check_map_prealloc(map->inner_map_meta)) {
verbose(env, "perf_event programs can only use preallocated inner hash map\n");
return -EINVAL;
}
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
return 0;
}
static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
{
return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose(env, "BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose(env, "BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose(env, "invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose(env,
"unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n",
insn->imm);
return PTR_ERR(map);
}
err = check_map_prog_compatibility(env, map, env->prog);
if (err) {
fdput(f);
return err;
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_used_maps()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
fdput(f);
return -EBUSY;
}
fdput(f);
next_insn:
insn++;
i++;
continue;
}
/* Basic sanity check before we invest more work here. */
if (!bpf_opcode_in_insntable(insn->code)) {
verbose(env, "unknown opcode %02x\n", insn->code);
return -EINVAL;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
/* drop refcnt of maps used by the rejected program */
static void release_maps(struct bpf_verifier_env *env)
{
enum bpf_cgroup_storage_type stype;
int i;
for_each_cgroup_storage_type(stype) {
if (!env->prog->aux->cgroup_storage[stype])
continue;
bpf_cgroup_storage_release(env->prog,
env->prog->aux->cgroup_storage[stype]);
}
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
insn->src_reg = 0;
}
/* single env->prog->insni[off] instruction was replaced with the range
* insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
* [0, off) and [off, end) to new locations, so the patched range stays zero
*/
static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
int i;
if (cnt == 1)
return 0;
new_data = vzalloc(array_size(prog_len,
sizeof(struct bpf_insn_aux_data)));
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++)
new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
}
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
{
int i;
if (len == 1)
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}
}
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
struct bpf_prog *new_prog;
new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
if (!new_prog)
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
return new_prog;
}
/* The verifier does more data flow analysis than llvm and will not
* explore branches that are dead at run time. Malicious programs can
* have dead code too. Therefore replace all dead at-run-time code
* with 'ja -1'.
*
* Just nops are not optimal, e.g. if they would sit at the end of the
* program and through another bug we would manage to jump there, then
* we'd execute beyond program memory otherwise. Returning exception
* code also wouldn't work since we can have subprogs where the dead
* code could be located.
*/
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
memcpy(insn + i, &trap, sizeof(trap));
}
}
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
* struct bpf_sock_ops -> struct sock
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
const struct bpf_verifier_ops *ops = env->ops;
int i, cnt, size, ctx_field_size, delta = 0;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
u32 target_size, size_default, off;
struct bpf_prog *new_prog;
enum bpf_access_type type;
bool is_narrower_load;
if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
} else if (cnt) {
new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
env->prog = new_prog;
delta += cnt - 1;
}
}
if (bpf_prog_is_dev_bound(env->prog->aux))
return 0;
insn = env->prog->insnsi + delta;
for (i = 0; i < insn_cnt; i++, insn++) {
bpf_convert_ctx_access_t convert_ctx_access;
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
continue;
if (type == BPF_WRITE &&
env->insn_aux_data[i + delta].sanitize_stack_off) {
struct bpf_insn patch[] = {
/* Sanitize suspicious stack slot with zero.
* There are no memory dependencies for this store,
* since it's only using frame pointer and immediate
* constant of zero
*/
BPF_ST_MEM(BPF_DW, BPF_REG_FP,
env->insn_aux_data[i + delta].sanitize_stack_off,
0),
/* the original STX instruction will immediately
* overwrite the same stack slot with appropriate value
*/
*insn,
};
cnt = ARRAY_SIZE(patch);
new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
switch (env->insn_aux_data[i + delta].ptr_type) {
case PTR_TO_CTX:
if (!ops->convert_ctx_access)
continue;
convert_ctx_access = ops->convert_ctx_access;
break;
case PTR_TO_SOCKET:
convert_ctx_access = bpf_sock_convert_ctx_access;
break;
default:
continue;
}
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
size = BPF_LDST_BYTES(insn);
/* If the read access is a narrower load of the field,
* convert to a 4/8-byte load, to minimum program type specific
* convert_ctx_access changes. If conversion is successful,
* we will apply proper mask to the result.
*/
is_narrower_load = size < ctx_field_size;
size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
off = insn->off;
if (is_narrower_load) {
u8 size_code;
if (type == BPF_WRITE) {
verbose(env, "bpf verifier narrow ctx access misconfigured\n");
return -EINVAL;
}
size_code = BPF_H;
if (ctx_field_size == 4)
size_code = BPF_W;
else if (ctx_field_size == 8)
size_code = BPF_DW;
insn->off = off & ~(size_default - 1);
insn->code = BPF_LDX | BPF_MEM | size_code;
}
target_size = 0;
cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
(ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
if (is_narrower_load && size < target_size) {
u8 shift = (off & (size_default - 1)) * 8;
if (ctx_field_size <= 4) {
if (shift)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
insn->dst_reg,
shift);
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
} else {
if (shift)
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
insn->dst_reg,
shift);
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
}
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
insn = new_prog->insnsi + i + delta;
}
return 0;
}
static int jit_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog;
struct bpf_insn *insn;
void *old_bpf_func;
int err;
if (env->subprog_cnt <= 1)
return 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
/* Upon error here we cannot fall back to interpreter but
* need a hard reject of the program. Thus -EFAULT is
* propagated in any case.
*/
subprog = find_subprog(env, i + insn->imm + 1);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
i + insn->imm + 1);
return -EFAULT;
}
/* temporarily remember subprog id inside insn instead of
* aux_data, since next loop will split up all insns into funcs
*/
insn->off = subprog;
/* remember original imm in case JIT fails and fallback
* to interpreter will be needed
*/
env->insn_aux_data[i].call_imm = insn->imm;
/* point imm to __bpf_call_base+1 from JITs point of view */
insn->imm = 1;
}
err = bpf_prog_alloc_jited_linfo(prog);
if (err)
goto out_undo_insn;
err = -ENOMEM;
func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
goto out_undo_insn;
for (i = 0; i < env->subprog_cnt; i++) {
subprog_start = subprog_end;
subprog_end = env->subprog_info[i + 1].start;
len = subprog_end - subprog_start;
func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
if (!func[i])
goto out_free;
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
len * sizeof(struct bpf_insn));
func[i]->type = prog->type;
func[i]->len = len;
if (bpf_prog_calc_tag(func[i]))
goto out_free;
func[i]->is_func = 1;
func[i]->aux->func_idx = i;
/* the btf and func_info will be freed only at prog->aux */
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->func_info = prog->aux->func_info;
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
*/
func[i]->aux->name[0] = 'F';
func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
func[i]->jit_requested = 1;
func[i]->aux->linfo = prog->aux->linfo;
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
goto out_free;
}
cond_resched();
}
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
*/
for (i = 0; i < env->subprog_cnt; i++) {
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
subprog = insn->off;
insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
func[subprog]->bpf_func -
__bpf_call_base;
}
/* we use the aux data to keep a list of the start addresses
* of the JITed images for each function in the program
*
* for some architectures, such as powerpc64, the imm field
* might not be large enough to hold the offset of the start
* address of the callee's JITed image from __bpf_call_base
*
* in such cases, we can lookup the start address of a callee
* by using its subprog id, available from the off field of
* the call instruction, as an index for this list
*/
func[i]->aux->func = func;
func[i]->aux->func_cnt = env->subprog_cnt;
}
for (i = 0; i < env->subprog_cnt; i++) {
old_bpf_func = func[i]->bpf_func;
tmp = bpf_int_jit_compile(func[i]);
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
err = -ENOTSUPP;
goto out_free;
}
cond_resched();
}
/* finally lock prog and jit images for all functions and
* populate kallsysm
*/
for (i = 0; i < env->subprog_cnt; i++) {
bpf_prog_lock_ro(func[i]);
bpf_prog_kallsyms_add(func[i]);
}
/* Last step: make now unused interpreter insns from main
* prog consistent for later dump requests, so they can
* later look the same as if they were interpreted only.
*/
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
insn->off = env->insn_aux_data[i].call_imm;
subprog = find_subprog(env, i + insn->off + 1);
insn->imm = subprog;
}
prog->jited = 1;
prog->bpf_func = func[0]->bpf_func;
prog->aux->func = func;
prog->aux->func_cnt = env->subprog_cnt;
bpf_prog_free_unused_jited_linfo(prog);
return 0;
out_free:
for (i = 0; i < env->subprog_cnt; i++)
if (func[i])
bpf_jit_free(func[i]);
kfree(func);
out_undo_insn:
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
insn->off = 0;
insn->imm = env->insn_aux_data[i].call_imm;
}
bpf_prog_free_jited_linfo(prog);
return err;
}
static int fixup_call_args(struct bpf_verifier_env *env)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
int i, depth;
#endif
int err = 0;
if (env->prog->jit_requested &&
!bpf_prog_is_dev_bound(env->prog->aux)) {
err = jit_subprogs(env);
if (err == 0)
return 0;
if (err == -EFAULT)
return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
depth = get_callee_stack_depth(env, insn, i);
if (depth < 0)
return depth;
bpf_patch_call_args(insn, depth);
}
err = 0;
#endif
return err;
}
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
* this function is called after eBPF program passed verification
*/
static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
struct bpf_insn mask_and_div[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx div 0 -> 0 */
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
struct bpf_insn mask_and_mod[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx mod 0 -> Rx */
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
*insn,
};
struct bpf_insn *patchlet;
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
patchlet = mask_and_div + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
} else {
patchlet = mask_and_mod + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
}
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (BPF_CLASS(insn->code) == BPF_LD &&
(BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
struct bpf_insn insn_buf[16];
struct bpf_insn *patch = &insn_buf[0];
bool issrc, isneg;
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
if (!aux->alu_state)
continue;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
BPF_ALU_SANITIZE_SRC;
off_reg = issrc ? insn->src_reg : insn->dst_reg;
if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
if (issrc) {
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
off_reg);
insn->src_reg = BPF_REG_AX;
} else {
*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
BPF_REG_AX);
}
if (isneg)
insn->code = insn->code == code_add ?
code_sub : code_add;
*patch++ = *insn;
if (issrc && isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
cnt = patch - insn_buf;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->src_reg == BPF_PSEUDO_CALL)
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_override_return)
prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
* be replaced dynamically during runtime in
* the program array.
*/
prog->cb_access = 1;
env->prog->aux->stack_depth = MAX_BPF_STACK;
env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
if (!bpf_map_ptr_unpriv(aux))
continue;
/* instead of changing every JIT dealing with tail_call
* emit two extra insns:
* if (index >= max_entries) goto out;
* index &= array->index_mask;
* to avoid out-of-bounds cpu speculation
*/
if (bpf_map_ptr_poisoned(aux)) {
verbose(env, "tail_call abusing map_ptr\n");
return -EINVAL;
}
map_ptr = BPF_MAP_PTR(aux->map_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
container_of(map_ptr,
struct bpf_array,
map)->index_mask);
insn_buf[2] = *insn;
cnt = 3;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* and other inlining handlers are currently limited to 64 bit
* only.
*/
if (prog->jit_requested && BITS_PER_LONG == 64 &&
(insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem ||
insn->imm == BPF_FUNC_map_delete_elem ||
insn->imm == BPF_FUNC_map_push_elem ||
insn->imm == BPF_FUNC_map_pop_elem ||
insn->imm == BPF_FUNC_map_peek_elem)) {
aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state);
ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta,
insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
(int (*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_push_elem,
(int (*)(struct bpf_map *map, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_update_elem:
insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_delete_elem:
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_push_elem:
insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_pop_elem:
insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_peek_elem:
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
__bpf_call_base;
continue;
}
goto patch_call_imm;
}
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
verbose(env,
"kernel subsystem misconfigured func %s#%d\n",
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
}
return 0;
}
static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
return;
for (i = 0; i < env->prog->len; i++) {
sl = env->explored_states[i];
if (sl)
while (sl != STATE_LIST_MARK) {
sln = sl->next;
free_verifier_state(&sl->state, false);
kfree(sl);
sl = sln;
}
}
kfree(env->explored_states);
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
int ret = -EINVAL;
/* no program is valid */
if (ARRAY_SIZE(bpf_verifier_ops) == 0)
return -EINVAL;
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
log = &env->log;
env->insn_aux_data =
vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
(*prog)->len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
if (attr->log_level || attr->log_buf || attr->log_size) {
/* user requested verbose verifier output
* and supplied buffer to store the verification trace
*/
log->level = attr->log_level;
log->ubuf = (char __user *) (unsigned long) attr->log_buf;
log->len_total = attr->log_size;
ret = -EINVAL;
/* log attributes have to be sane */
if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
!log->level || !log->ubuf)
goto err_unlock;
}
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret)
goto skip_full_check;
}
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = check_subprogs(env);
if (ret < 0)
goto skip_full_check;
ret = check_btf_info(env, attr, uattr);
if (ret < 0)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
}
if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
ret = bpf_prog_offload_finalize(env);
skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
if (ret == 0)
ret = check_max_stack_depth(env);
/* instruction rewrites happen after this point */
if (ret == 0)
sanitize_dead_code(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (ret == 0)
ret = fixup_bpf_calls(env);
if (ret == 0)
ret = fixup_call_args(env);
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {
ret = -EFAULT;
goto err_release_maps;
}
if (ret == 0 && env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
GFP_KERNEL);
if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto err_release_maps;
}
memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
env->prog->aux->used_map_cnt = env->used_map_cnt;
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
*/
convert_pseudo_ld_imm64(env);
}
if (ret == 0)
adjust_btf_func(env);
err_release_maps:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_used_maps() will release them.
*/
release_maps(env);
*prog = env->prog;
err_unlock:
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_1412_1 |
crossvul-cpp_data_good_1613_0 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
*
* This file provides the implementation of a USB host controller device that
* does not have any associated hardware. Instead the virtual device is
* connected to the WiFi network and emulates the operation of a USB hcd by
* receiving and sending network frames.
* Note:
* We take great pains to reduce the amount of code where interrupts need to be
* disabled and in this respect we are different from standard HCD's. In
* particular we don't want in_irq() code bleeding over to the protocol side of
* the driver.
* The troublesome functions are the urb enqueue and dequeue functions both of
* which can be called in_irq(). So for these functions we put the urbs into a
* queue and request a tasklet to process them. This means that a spinlock with
* interrupts disabled must be held for insertion and removal but most code is
* is in tasklet or soft irq context. The lock that protects this list is called
* the tasklet lock and serves the purpose of the 'HCD lock' which must be held
* when calling the following functions.
* usb_hcd_link_urb_to_ep()
* usb_hcd_unlink_urb_from_ep()
* usb_hcd_flush_endpoint()
* usb_hcd_check_unlink_urb()
* -----------------------------------------------------------------------------
*/
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "linux/usb/hcd.h"
#include <asm/unaligned.h>
#include "ozdbg.h"
#include "ozusbif.h"
#include "ozurbparanoia.h"
#include "ozhcd.h"
/*
* Number of units of buffering to capture for an isochronous IN endpoint before
* allowing data to be indicated up.
*/
#define OZ_IN_BUFFERING_UNITS 100
/* Name of our platform device.
*/
#define OZ_PLAT_DEV_NAME "ozwpan"
/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
*/
#define EP0_TIMEOUT_COUNTER 13
/* Debounce time HCD driver should wait before unregistering.
*/
#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
/*
* Used to link urbs together and also store some status information for each
* urb.
* A cache of these are kept in a pool to reduce number of calls to kmalloc.
*/
struct oz_urb_link {
struct list_head link;
struct urb *urb;
struct oz_port *port;
u8 req_id;
u8 ep_num;
unsigned submit_counter;
};
static struct kmem_cache *oz_urb_link_cache;
/* Holds state information about a USB endpoint.
*/
#define OZ_EP_BUFFER_SIZE_ISOC (1024 * 24)
#define OZ_EP_BUFFER_SIZE_INT 512
struct oz_endpoint {
struct list_head urb_list; /* List of oz_urb_link items. */
struct list_head link; /* For isoc ep, links in to isoc
lists of oz_port. */
struct timespec timestamp;
int credit;
int credit_ceiling;
u8 ep_num;
u8 attrib;
u8 *buffer;
int buffer_size;
int in_ix;
int out_ix;
int buffered_units;
unsigned flags;
int start_frame;
};
/* Bits in the flags field. */
#define OZ_F_EP_BUFFERING 0x1
#define OZ_F_EP_HAVE_STREAM 0x2
/* Holds state information about a USB interface.
*/
struct oz_interface {
unsigned ep_mask;
u8 alt;
};
/* Holds state information about an hcd port.
*/
#define OZ_NB_ENDPOINTS 16
struct oz_port {
unsigned flags;
unsigned status;
void *hpd;
struct oz_hcd *ozhcd;
spinlock_t port_lock;
u8 bus_addr;
u8 next_req_id;
u8 config_num;
int num_iface;
struct oz_interface *iface;
struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
struct list_head isoc_out_ep;
struct list_head isoc_in_ep;
};
#define OZ_PORT_F_PRESENT 0x1
#define OZ_PORT_F_CHANGED 0x2
#define OZ_PORT_F_DYING 0x4
/* Data structure in the private context area of struct usb_hcd.
*/
#define OZ_NB_PORTS 8
struct oz_hcd {
spinlock_t hcd_lock;
struct list_head urb_pending_list;
struct list_head urb_cancel_list;
struct list_head orphanage;
int conn_port; /* Port that is currently connecting, -1 if none.*/
struct oz_port ports[OZ_NB_PORTS];
uint flags;
struct usb_hcd *hcd;
};
/* Bits in flags field.
*/
#define OZ_HDC_F_SUSPENDED 0x1
/*
* Static function prototypes.
*/
static int oz_hcd_start(struct usb_hcd *hcd);
static void oz_hcd_stop(struct usb_hcd *hcd);
static void oz_hcd_shutdown(struct usb_hcd *hcd);
static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags);
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength);
static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
static int oz_hcd_bus_resume(struct usb_hcd *hcd);
static int oz_plat_probe(struct platform_device *dev);
static int oz_plat_remove(struct platform_device *dev);
static void oz_plat_shutdown(struct platform_device *dev);
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
static int oz_plat_resume(struct platform_device *dev);
static void oz_urb_process_tasklet(unsigned long unused);
static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port, struct usb_host_config *config,
gfp_t mem_flags);
static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port);
static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port,
struct usb_host_interface *intf, gfp_t mem_flags);
static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port, int if_ix);
static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
gfp_t mem_flags);
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb);
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
/*
* Static external variables.
*/
static struct platform_device *g_plat_dev;
static struct oz_hcd *g_ozhcd;
static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
static const char g_hcd_name[] = "Ozmo WPAN";
static DEFINE_SPINLOCK(g_tasklet_lock);
static struct tasklet_struct g_urb_process_tasklet;
static struct tasklet_struct g_urb_cancel_tasklet;
static atomic_t g_pending_urbs = ATOMIC_INIT(0);
static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
static const struct hc_driver g_oz_hc_drv = {
.description = g_hcd_name,
.product_desc = "Ozmo Devices WPAN",
.hcd_priv_size = sizeof(struct oz_hcd),
.flags = HCD_USB11,
.start = oz_hcd_start,
.stop = oz_hcd_stop,
.shutdown = oz_hcd_shutdown,
.urb_enqueue = oz_hcd_urb_enqueue,
.urb_dequeue = oz_hcd_urb_dequeue,
.endpoint_disable = oz_hcd_endpoint_disable,
.endpoint_reset = oz_hcd_endpoint_reset,
.get_frame_number = oz_hcd_get_frame_number,
.hub_status_data = oz_hcd_hub_status_data,
.hub_control = oz_hcd_hub_control,
.bus_suspend = oz_hcd_bus_suspend,
.bus_resume = oz_hcd_bus_resume,
};
static struct platform_driver g_oz_plat_drv = {
.probe = oz_plat_probe,
.remove = oz_plat_remove,
.shutdown = oz_plat_shutdown,
.suspend = oz_plat_suspend,
.resume = oz_plat_resume,
.driver = {
.name = OZ_PLAT_DEV_NAME,
},
};
/*
* Gets our private context area (which is of type struct oz_hcd) from the
* usb_hcd structure.
* Context: any
*/
static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
{
return (struct oz_hcd *)hcd->hcd_priv;
}
/*
* Searches list of ports to find the index of the one with a specified USB
* bus address. If none of the ports has the bus address then the connection
* port is returned, if there is one or -1 otherwise.
* Context: any
*/
static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
{
int i;
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].bus_addr == bus_addr)
return i;
}
return ozhcd->conn_port;
}
/*
* Context: any
*/
static struct oz_urb_link *oz_alloc_urb_link(void)
{
return kmem_cache_alloc(oz_urb_link_cache, GFP_ATOMIC);
}
/*
* Context: any
*/
static void oz_free_urb_link(struct oz_urb_link *urbl)
{
if (!urbl)
return;
kmem_cache_free(oz_urb_link_cache, urbl);
}
/*
* Allocates endpoint structure and optionally a buffer. If a buffer is
* allocated it immediately follows the endpoint structure.
* Context: softirq
*/
static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
{
struct oz_endpoint *ep;
ep = kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
if (!ep)
return NULL;
INIT_LIST_HEAD(&ep->urb_list);
INIT_LIST_HEAD(&ep->link);
ep->credit = -1;
if (buffer_size) {
ep->buffer_size = buffer_size;
ep->buffer = (u8 *)(ep+1);
}
return ep;
}
/*
* Pre-condition: Must be called with g_tasklet_lock held and interrupts
* disabled.
* Context: softirq or process
*/
static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd,
struct urb *urb)
{
struct oz_urb_link *urbl;
list_for_each_entry(urbl, &ozhcd->urb_cancel_list, link) {
if (urb == urbl->urb) {
list_del_init(&urbl->link);
return urbl;
}
}
return NULL;
}
/*
* This is called when we have finished processing an urb. It unlinks it from
* the ep and returns it to the core.
* Context: softirq or process
*/
static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned long irq_state;
struct oz_urb_link *cancel_urbl;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Clear hcpriv which will prevent it being put in the cancel list
* in the event that an attempt is made to cancel it.
*/
urb->hcpriv = NULL;
/* Walk the cancel list in case the urb is already sitting there.
* Since we process the cancel list in a tasklet rather than in
* the dequeue function this could happen.
*/
cancel_urbl = oz_uncancel_urb(ozhcd, urb);
/* Note: we release lock but do not enable local irqs.
* It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
* or at least other host controllers disable interrupts at this point
* so we do the same. We must, however, release the lock otherwise a
* deadlock will occur if an urb is submitted to our driver in the urb
* completion function. Because we disable interrupts it is possible
* that the urb_enqueue function can be called with them disabled.
*/
spin_unlock(&g_tasklet_lock);
if (oz_forget_urb(urb)) {
oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
} else {
atomic_dec(&g_pending_urbs);
usb_hcd_giveback_urb(hcd, urb, status);
}
spin_lock(&g_tasklet_lock);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_free_urb_link(cancel_urbl);
}
/*
* Deallocates an endpoint including deallocating any associated stream and
* returning any queued urbs to the core.
* Context: softirq
*/
static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
{
if (port) {
LIST_HEAD(list);
struct oz_hcd *ozhcd = port->ozhcd;
if (ep->flags & OZ_F_EP_HAVE_STREAM)
oz_usb_stream_delete(port->hpd, ep->ep_num);
/* Transfer URBs to the orphanage while we hold the lock. */
spin_lock_bh(&ozhcd->hcd_lock);
/* Note: this works even if ep->urb_list is empty.*/
list_replace_init(&ep->urb_list, &list);
/* Put the URBs in the orphanage. */
list_splice_tail(&list, &ozhcd->orphanage);
spin_unlock_bh(&ozhcd->hcd_lock);
}
oz_dbg(ON, "Freeing endpoint memory\n");
kfree(ep);
}
/*
* Context: softirq
*/
static void oz_complete_buffered_urb(struct oz_port *port,
struct oz_endpoint *ep,
struct urb *urb)
{
int data_len, available_space, copy_len;
data_len = ep->buffer[ep->out_ix];
if (data_len <= urb->transfer_buffer_length)
available_space = data_len;
else
available_space = urb->transfer_buffer_length;
if (++ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
copy_len = ep->buffer_size - ep->out_ix;
if (copy_len >= available_space)
copy_len = available_space;
memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len);
if (copy_len < available_space) {
memcpy((urb->transfer_buffer + copy_len), ep->buffer,
(available_space - copy_len));
ep->out_ix = available_space - copy_len;
} else {
ep->out_ix += copy_len;
}
urb->actual_length = available_space;
if (ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
ep->buffered_units--;
oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
available_space);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/*
* Context: softirq
*/
static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb, u8 req_id)
{
struct oz_urb_link *urbl;
struct oz_endpoint *ep = NULL;
int err = 0;
if (ep_addr >= OZ_NB_ENDPOINTS) {
oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
return -EINVAL;
}
urbl = oz_alloc_urb_link();
if (!urbl)
return -ENOMEM;
urbl->submit_counter = 0;
urbl->urb = urb;
urbl->req_id = req_id;
urbl->ep_num = ep_addr;
/* Hold lock while we insert the URB into the list within the
* endpoint structure.
*/
spin_lock_bh(&port->ozhcd->hcd_lock);
/* If the urb has been unlinked while out of any list then
* complete it now.
*/
if (urb->unlinked) {
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
oz_free_urb_link(urbl);
return 0;
}
if (in_dir)
ep = port->in_ep[ep_addr];
else
ep = port->out_ep[ep_addr];
if (!ep) {
err = -ENOMEM;
goto out;
}
/*For interrupt endpoint check for buffered data
* & complete urb
*/
if (((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
&& ep->buffered_units > 0) {
oz_free_urb_link(urbl);
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_complete_buffered_urb(port, ep, urb);
return 0;
}
if (port->hpd) {
list_add_tail(&urbl->link, &ep->urb_list);
if (!in_dir && ep_addr && (ep->credit < 0)) {
getrawmonotonic(&ep->timestamp);
ep->credit = 0;
}
} else {
err = -EPIPE;
}
out:
spin_unlock_bh(&port->ozhcd->hcd_lock);
if (err)
oz_free_urb_link(urbl);
return err;
}
/*
* Removes an urb from the queue in the endpoint.
* Returns 0 if it is found and -EIDRM otherwise.
* Context: softirq
*/
static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb)
{
struct oz_urb_link *urbl = NULL;
struct oz_endpoint *ep;
spin_lock_bh(&port->ozhcd->hcd_lock);
if (in_dir)
ep = port->in_ep[ep_addr];
else
ep = port->out_ep[ep_addr];
if (ep) {
struct list_head *e;
list_for_each(e, &ep->urb_list) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del_init(e);
break;
}
urbl = NULL;
}
}
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_free_urb_link(urbl);
return urbl ? 0 : -EIDRM;
}
/*
* Finds an urb given its request id.
* Context: softirq
*/
static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
u8 req_id)
{
struct oz_hcd *ozhcd = port->ozhcd;
struct urb *urb = NULL;
struct oz_urb_link *urbl;
struct oz_endpoint *ep;
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->out_ep[ep_ix];
if (ep) {
struct list_head *e;
list_for_each(e, &ep->urb_list) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urbl->req_id == req_id) {
urb = urbl->urb;
list_del_init(e);
break;
}
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
/* If urb is non-zero then we we must have an urb link to delete.
*/
if (urb)
oz_free_urb_link(urbl);
return urb;
}
/*
* Pre-condition: Port lock must be held.
* Context: softirq
*/
static void oz_acquire_port(struct oz_port *port, void *hpd)
{
INIT_LIST_HEAD(&port->isoc_out_ep);
INIT_LIST_HEAD(&port->isoc_in_ep);
port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
port->status |= USB_PORT_STAT_CONNECTION |
(USB_PORT_STAT_C_CONNECTION << 16);
oz_usb_get(hpd);
port->hpd = hpd;
}
/*
* Context: softirq
*/
static struct oz_hcd *oz_hcd_claim(void)
{
struct oz_hcd *ozhcd;
spin_lock_bh(&g_hcdlock);
ozhcd = g_ozhcd;
if (ozhcd)
usb_get_hcd(ozhcd->hcd);
spin_unlock_bh(&g_hcdlock);
return ozhcd;
}
/*
* Context: softirq
*/
static inline void oz_hcd_put(struct oz_hcd *ozhcd)
{
if (ozhcd)
usb_put_hcd(ozhcd->hcd);
}
/*
* This is called by the protocol handler to notify that a PD has arrived.
* We allocate a port to associate with the PD and create a structure for
* endpoint 0. This port is made the connection port.
* In the event that one of the other port is already a connection port then
* we fail.
* TODO We should be able to do better than fail and should be able remember
* that this port needs configuring and make it the connection port once the
* current connection port has been assigned an address. Collisions here are
* probably very rare indeed.
* Context: softirq
*/
struct oz_port *oz_hcd_pd_arrived(void *hpd)
{
int i;
struct oz_port *hport;
struct oz_hcd *ozhcd;
struct oz_endpoint *ep;
ozhcd = oz_hcd_claim();
if (!ozhcd)
return NULL;
/* Allocate an endpoint object in advance (before holding hcd lock) to
* use for out endpoint 0.
*/
ep = oz_ep_alloc(0, GFP_ATOMIC);
if (!ep)
goto err_put;
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0)
goto err_unlock;
for (i = 0; i < OZ_NB_PORTS; i++) {
struct oz_port *port = &ozhcd->ports[i];
spin_lock(&port->port_lock);
if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
oz_acquire_port(port, hpd);
spin_unlock(&port->port_lock);
break;
}
spin_unlock(&port->port_lock);
}
if (i == OZ_NB_PORTS)
goto err_unlock;
ozhcd->conn_port = i;
hport = &ozhcd->ports[i];
hport->out_ep[0] = ep;
spin_unlock_bh(&ozhcd->hcd_lock);
if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
usb_hcd_resume_root_hub(ozhcd->hcd);
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_hcd_put(ozhcd);
return hport;
err_unlock:
spin_unlock_bh(&ozhcd->hcd_lock);
oz_ep_free(NULL, ep);
err_put:
oz_hcd_put(ozhcd);
return NULL;
}
/*
* This is called by the protocol handler to notify that the PD has gone away.
* We need to deallocate all resources and then request that the root hub is
* polled. We release the reference we hold on the PD.
* Context: softirq
*/
void oz_hcd_pd_departed(struct oz_port *port)
{
struct oz_hcd *ozhcd;
void *hpd;
struct oz_endpoint *ep = NULL;
if (port == NULL) {
oz_dbg(ON, "%s: port = 0\n", __func__);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL)
return;
/* Check if this is the connection port - if so clear it.
*/
spin_lock_bh(&ozhcd->hcd_lock);
if ((ozhcd->conn_port >= 0) &&
(port == &ozhcd->ports[ozhcd->conn_port])) {
oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_lock(&port->port_lock);
port->flags |= OZ_PORT_F_DYING;
spin_unlock(&port->port_lock);
spin_unlock_bh(&ozhcd->hcd_lock);
oz_clean_endpoints_for_config(ozhcd->hcd, port);
spin_lock_bh(&port->port_lock);
hpd = port->hpd;
port->hpd = NULL;
port->bus_addr = 0xff;
port->config_num = 0;
port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
port->flags |= OZ_PORT_F_CHANGED;
port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE);
port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
/* If there is an endpont 0 then clear the pointer while we hold
* the spinlock be we deallocate it after releasing the lock.
*/
if (port->out_ep[0]) {
ep = port->out_ep[0];
port->out_ep[0] = NULL;
}
spin_unlock_bh(&port->port_lock);
if (ep)
oz_ep_free(port, ep);
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_usb_put(hpd);
}
/*
* Context: softirq
*/
void oz_hcd_pd_reset(void *hpd, void *hport)
{
/* Cleanup the current configuration and report reset to the core.
*/
struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
oz_dbg(ON, "PD Reset\n");
spin_lock_bh(&port->port_lock);
port->flags |= OZ_PORT_F_CHANGED;
port->status |= USB_PORT_STAT_RESET;
port->status |= (USB_PORT_STAT_C_RESET << 16);
spin_unlock_bh(&port->port_lock);
oz_clean_endpoints_for_config(ozhcd->hcd, port);
usb_hcd_poll_rh_status(ozhcd->hcd);
}
/*
* Context: softirq
*/
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
u8 length, u16 offset, u16 total_size)
{
struct oz_port *port = hport;
struct urb *urb;
int err = 0;
oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb)
return;
if (status == 0) {
unsigned int copy_len;
unsigned int required_size = urb->transfer_buffer_length;
if (required_size > total_size)
required_size = total_size;
copy_len = required_size-offset;
if (length <= copy_len)
copy_len = length;
memcpy(urb->transfer_buffer+offset, desc, copy_len);
offset += copy_len;
if (offset < required_size) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *)urb->setup_packet;
unsigned wvalue = le16_to_cpu(setup->wValue);
if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
err = -ENOMEM;
else if (oz_usb_get_desc_req(port->hpd, req_id,
setup->bRequestType, (u8)(wvalue>>8),
(u8)wvalue, setup->wIndex, offset,
required_size-offset)) {
oz_dequeue_ep_urb(port, 0, 0, urb);
err = -ENOMEM;
}
if (err == 0)
return;
}
}
urb->actual_length = total_size;
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/*
* Context: softirq
*/
static void oz_display_conf_type(u8 t)
{
switch (t) {
case USB_REQ_GET_STATUS:
oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
break;
case USB_REQ_CLEAR_FEATURE:
oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
break;
case USB_REQ_SET_FEATURE:
oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
break;
case USB_REQ_SET_ADDRESS:
oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
break;
case USB_REQ_GET_DESCRIPTOR:
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_SET_DESCRIPTOR:
oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_GET_CONFIGURATION:
oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
break;
case USB_REQ_SET_CONFIGURATION:
oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
break;
case USB_REQ_GET_INTERFACE:
oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
break;
case USB_REQ_SET_INTERFACE:
oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
break;
case USB_REQ_SYNCH_FRAME:
oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
break;
}
}
/*
* Context: softirq
*/
static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
u8 rcode, u8 config_num)
{
int rc = 0;
struct usb_hcd *hcd = port->ozhcd->hcd;
if (rcode == 0) {
port->config_num = config_num;
oz_clean_endpoints_for_config(hcd, port);
if (oz_build_endpoints_for_config(hcd, port,
&urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
rc = -ENOMEM;
}
} else {
rc = -ENOMEM;
}
oz_complete_urb(hcd, urb, rc);
}
/*
* Context: softirq
*/
static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
u8 rcode, u8 if_num, u8 alt)
{
struct usb_hcd *hcd = port->ozhcd->hcd;
int rc = 0;
if ((rcode == 0) && (port->config_num > 0)) {
struct usb_host_config *config;
struct usb_host_interface *intf;
oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
oz_clean_endpoints_for_interface(hcd, port, if_num);
config = &urb->dev->config[port->config_num-1];
intf = &config->intf_cache[if_num]->altsetting[alt];
if (oz_build_endpoints_for_interface(hcd, port, intf,
GFP_ATOMIC))
rc = -ENOMEM;
else
port->iface[if_num].alt = alt;
} else {
rc = -ENOMEM;
}
oz_complete_urb(hcd, urb, rc);
}
/*
* Context: softirq
*/
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
int data_len)
{
struct oz_port *port = hport;
struct urb *urb;
struct usb_ctrlrequest *setup;
struct usb_hcd *hcd = port->ozhcd->hcd;
unsigned windex;
unsigned wvalue;
oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb) {
oz_dbg(ON, "URB not found\n");
return;
}
setup = (struct usb_ctrlrequest *)urb->setup_packet;
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
/* Standard requests */
oz_display_conf_type(setup->bRequest);
switch (setup->bRequest) {
case USB_REQ_SET_CONFIGURATION:
oz_hcd_complete_set_config(port, urb, rcode,
(u8)wvalue);
break;
case USB_REQ_SET_INTERFACE:
oz_hcd_complete_set_interface(port, urb, rcode,
(u8)windex, (u8)wvalue);
break;
default:
oz_complete_urb(hcd, urb, 0);
}
} else {
int copy_len;
oz_dbg(ON, "VENDOR-CLASS - cnf\n");
if (data_len) {
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
else
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
}
oz_complete_urb(hcd, urb, 0);
}
}
/*
* Context: softirq-serialized
*/
static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
int data_len)
{
int space;
int copy_len;
if (!ep->buffer)
return -1;
space = ep->out_ix-ep->in_ix-1;
if (space < 0)
space += ep->buffer_size;
if (space < (data_len+1)) {
oz_dbg(ON, "Buffer full\n");
return -1;
}
ep->buffer[ep->in_ix] = (u8)data_len;
if (++ep->in_ix == ep->buffer_size)
ep->in_ix = 0;
copy_len = ep->buffer_size - ep->in_ix;
if (copy_len > data_len)
copy_len = data_len;
memcpy(&ep->buffer[ep->in_ix], data, copy_len);
if (copy_len < data_len) {
memcpy(ep->buffer, data+copy_len, data_len-copy_len);
ep->in_ix = data_len-copy_len;
} else {
ep->in_ix += copy_len;
}
if (ep->in_ix == ep->buffer_size)
ep->in_ix = 0;
ep->buffered_units++;
return 0;
}
/*
* Context: softirq-serialized
*/
void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
{
struct oz_port *port = (struct oz_port *)hport;
struct oz_endpoint *ep;
struct oz_hcd *ozhcd = port->ozhcd;
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
if (ep == NULL)
goto done;
switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
case USB_ENDPOINT_XFER_BULK:
if (!list_empty(&ep->urb_list)) {
struct oz_urb_link *urbl =
list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
struct urb *urb;
int copy_len;
list_del_init(&urbl->link);
spin_unlock_bh(&ozhcd->hcd_lock);
urb = urbl->urb;
oz_free_urb_link(urbl);
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
else
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
oz_complete_urb(port->ozhcd->hcd, urb, 0);
return;
}
oz_dbg(ON, "buffering frame as URB is not available\n");
oz_hcd_buffer_data(ep, data, data_len);
break;
case USB_ENDPOINT_XFER_ISOC:
oz_hcd_buffer_data(ep, data, data_len);
break;
}
done:
spin_unlock_bh(&ozhcd->hcd_lock);
}
/*
* Context: unknown
*/
static inline int oz_usb_get_frame_number(void)
{
return atomic_inc_return(&g_usb_frame_number);
}
/*
* Context: softirq
*/
int oz_hcd_heartbeat(void *hport)
{
int rc = 0;
struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
struct oz_urb_link *urbl, *n;
LIST_HEAD(xfr_list);
struct urb *urb;
struct oz_endpoint *ep;
struct timespec ts, delta;
getrawmonotonic(&ts);
/* Check the OUT isoc endpoints to see if any URB data can be sent.
*/
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_entry(ep, &port->isoc_out_ep, link) {
if (ep->credit < 0)
continue;
delta = timespec_sub(ts, ep->timestamp);
ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
if (ep->credit > ep->credit_ceiling)
ep->credit = ep->credit_ceiling;
ep->timestamp = ts;
while (ep->credit && !list_empty(&ep->urb_list)) {
urbl = list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
urb = urbl->urb;
if ((ep->credit + 1) < urb->number_of_packets)
break;
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
list_move_tail(&urbl->link, &xfr_list);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
/* Send to PD and complete URBs.
*/
list_for_each_entry_safe(urbl, n, &xfr_list, link) {
urb = urbl->urb;
list_del_init(&urbl->link);
urb->error_count = 0;
urb->start_frame = oz_usb_get_frame_number();
oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
oz_free_urb_link(urbl);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check the IN isoc endpoints to see if any URBs can be completed.
*/
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_entry(ep, &port->isoc_in_ep, link) {
if (ep->flags & OZ_F_EP_BUFFERING) {
if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
ep->flags &= ~OZ_F_EP_BUFFERING;
ep->credit = 0;
ep->timestamp = ts;
ep->start_frame = 0;
}
continue;
}
delta = timespec_sub(ts, ep->timestamp);
ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
ep->timestamp = ts;
list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
struct urb *urb = urbl->urb;
int len = 0;
int copy_len;
int i;
if (ep->credit < urb->number_of_packets)
break;
if (ep->buffered_units < urb->number_of_packets)
break;
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
len = ep->buffer[ep->out_ix];
if (++ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
copy_len = ep->buffer_size - ep->out_ix;
if (copy_len > len)
copy_len = len;
memcpy(urb->transfer_buffer,
&ep->buffer[ep->out_ix], copy_len);
if (copy_len < len) {
memcpy(urb->transfer_buffer+copy_len,
ep->buffer, len-copy_len);
ep->out_ix = len-copy_len;
} else
ep->out_ix += copy_len;
if (ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
urb->iso_frame_desc[i].offset =
urb->actual_length;
urb->actual_length += len;
urb->iso_frame_desc[i].actual_length = len;
urb->iso_frame_desc[i].status = 0;
}
ep->buffered_units -= urb->number_of_packets;
urb->error_count = 0;
urb->start_frame = ep->start_frame;
ep->start_frame += urb->number_of_packets;
list_move_tail(&urbl->link, &xfr_list);
ep->credit -= urb->number_of_packets;
}
}
if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
rc = 1;
spin_unlock_bh(&ozhcd->hcd_lock);
/* Complete the filled URBs.
*/
list_for_each_entry_safe(urbl, n, &xfr_list, link) {
urb = urbl->urb;
list_del_init(&urbl->link);
oz_free_urb_link(urbl);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check if there are any ep0 requests that have timed out.
* If so resent to PD.
*/
ep = port->out_ep[0];
if (ep) {
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
list_move_tail(&urbl->link, &xfr_list);
urbl->submit_counter = 0;
} else {
urbl->submit_counter++;
}
}
if (!list_empty(&ep->urb_list))
rc = 1;
spin_unlock_bh(&ozhcd->hcd_lock);
list_for_each_entry_safe(urbl, n, &xfr_list, link) {
oz_dbg(ON, "Resending request to PD\n");
oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
oz_free_urb_link(urbl);
}
}
return rc;
}
/*
* Context: softirq
*/
static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port,
struct usb_host_interface *intf, gfp_t mem_flags)
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
int if_ix = intf->desc.bInterfaceNumber;
int request_heartbeat = 0;
oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
if (if_ix >= port->num_iface || port->iface == NULL)
return -ENOMEM;
for (i = 0; i < intf->desc.bNumEndpoints; i++) {
struct usb_host_endpoint *hep = &intf->endpoint[i];
u8 ep_addr = hep->desc.bEndpointAddress;
u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
struct oz_endpoint *ep;
int buffer_size = 0;
oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
switch (hep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
break;
case USB_ENDPOINT_XFER_INT:
buffer_size = OZ_EP_BUFFER_SIZE_INT;
break;
}
}
ep = oz_ep_alloc(buffer_size, mem_flags);
if (!ep) {
oz_clean_endpoints_for_interface(hcd, port, if_ix);
return -ENOMEM;
}
ep->attrib = hep->desc.bmAttributes;
ep->ep_num = ep_num;
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
oz_dbg(ON, "wMaxPacketSize = %d\n",
usb_endpoint_maxp(&hep->desc));
ep->credit_ceiling = 200;
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
ep->flags |= OZ_F_EP_BUFFERING;
} else {
ep->flags |= OZ_F_EP_HAVE_STREAM;
if (oz_usb_stream_create(port->hpd, ep_num))
ep->flags &= ~OZ_F_EP_HAVE_STREAM;
}
}
spin_lock_bh(&ozhcd->hcd_lock);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
port->in_ep[ep_num] = ep;
port->iface[if_ix].ep_mask |=
(1<<(ep_num+OZ_NB_ENDPOINTS));
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
list_add_tail(&ep->link, &port->isoc_in_ep);
request_heartbeat = 1;
}
} else {
port->out_ep[ep_num] = ep;
port->iface[if_ix].ep_mask |= (1<<ep_num);
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
list_add_tail(&ep->link, &port->isoc_out_ep);
request_heartbeat = 1;
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
if (request_heartbeat && port->hpd)
oz_usb_request_heartbeat(port->hpd);
}
return 0;
}
/*
* Context: softirq
*/
static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port, int if_ix)
{
struct oz_hcd *ozhcd = port->ozhcd;
unsigned mask;
int i;
LIST_HEAD(ep_list);
struct oz_endpoint *ep, *n;
oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
if (if_ix >= port->num_iface)
return;
spin_lock_bh(&ozhcd->hcd_lock);
mask = port->iface[if_ix].ep_mask;
port->iface[if_ix].ep_mask = 0;
for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
struct list_head *e;
/* Gather OUT endpoints.
*/
if ((mask & (1<<i)) && port->out_ep[i]) {
e = &port->out_ep[i]->link;
port->out_ep[i] = NULL;
/* Remove from isoc list if present.
*/
list_move_tail(e, &ep_list);
}
/* Gather IN endpoints.
*/
if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
e = &port->in_ep[i]->link;
port->in_ep[i] = NULL;
list_move_tail(e, &ep_list);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
list_for_each_entry_safe(ep, n, &ep_list, link) {
list_del_init(&ep->link);
oz_ep_free(port, ep);
}
}
/*
* Context: softirq
*/
static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port, struct usb_host_config *config,
gfp_t mem_flags)
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
int num_iface = config->desc.bNumInterfaces;
if (num_iface) {
struct oz_interface *iface;
iface = kmalloc_array(num_iface, sizeof(struct oz_interface),
mem_flags | __GFP_ZERO);
if (!iface)
return -ENOMEM;
spin_lock_bh(&ozhcd->hcd_lock);
port->iface = iface;
port->num_iface = num_iface;
spin_unlock_bh(&ozhcd->hcd_lock);
}
for (i = 0; i < num_iface; i++) {
struct usb_host_interface *intf =
&config->intf_cache[i]->altsetting[0];
if (oz_build_endpoints_for_interface(hcd, port, intf,
mem_flags))
goto fail;
}
return 0;
fail:
oz_clean_endpoints_for_config(hcd, port);
return -1;
}
/*
* Context: softirq
*/
static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port)
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
oz_dbg(ON, "Deleting endpoints for configuration\n");
for (i = 0; i < port->num_iface; i++)
oz_clean_endpoints_for_interface(hcd, port, i);
spin_lock_bh(&ozhcd->hcd_lock);
if (port->iface) {
oz_dbg(ON, "Freeing interfaces object\n");
kfree(port->iface);
port->iface = NULL;
}
port->num_iface = 0;
spin_unlock_bh(&ozhcd->hcd_lock);
}
/*
* Context: tasklet
*/
static void *oz_claim_hpd(struct oz_port *port)
{
void *hpd;
struct oz_hcd *ozhcd = port->ozhcd;
spin_lock_bh(&ozhcd->hcd_lock);
hpd = port->hpd;
if (hpd)
oz_usb_get(hpd);
spin_unlock_bh(&ozhcd->hcd_lock);
return hpd;
}
/*
* Context: tasklet
*/
static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
gfp_t mem_flags)
{
struct usb_ctrlrequest *setup;
unsigned windex;
unsigned wvalue;
unsigned wlength;
void *hpd;
u8 req_id;
int rc = 0;
unsigned complete = 0;
int port_ix = -1;
struct oz_port *port = NULL;
oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
if (port_ix < 0) {
rc = -EPIPE;
goto out;
}
port = &ozhcd->ports[port_ix];
if (((port->flags & OZ_PORT_F_PRESENT) == 0)
|| (port->flags & OZ_PORT_F_DYING)) {
oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
port_ix, urb->dev->devnum);
rc = -EPIPE;
goto out;
}
/* Store port in private context data.
*/
urb->hcpriv = port;
setup = (struct usb_ctrlrequest *)urb->setup_packet;
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
wlength = le16_to_cpu(setup->wLength);
oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
req_id = port->next_req_id++;
hpd = oz_claim_hpd(port);
if (hpd == NULL) {
oz_dbg(ON, "Cannot claim port\n");
rc = -EPIPE;
goto out;
}
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
/* Standard requests
*/
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
break;
case USB_REQ_SET_ADDRESS:
oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
oz_dbg(ON, "Port %d address is 0x%x\n",
ozhcd->conn_port,
(u8)le16_to_cpu(setup->wValue));
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0) {
ozhcd->ports[ozhcd->conn_port].bus_addr =
(u8)le16_to_cpu(setup->wValue);
oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_unlock_bh(&ozhcd->hcd_lock);
complete = 1;
break;
case USB_REQ_SET_CONFIGURATION:
oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
break;
case USB_REQ_GET_CONFIGURATION:
/* We short circuit this case and reply directly since
* we have the selected configuration number cached.
*/
oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->config_num;
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_GET_INTERFACE:
/* We short circuit this case and reply directly since
* we have the selected interface alternative cached.
*/
oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->iface[(u8)windex].alt;
oz_dbg(ON, "interface = %d alt = %d\n",
windex, port->iface[(u8)windex].alt);
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_SET_INTERFACE:
oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
break;
}
}
if (!rc && !complete) {
int data_len = 0;
if ((setup->bRequestType & USB_DIR_IN) == 0)
data_len = wlength;
urb->actual_length = data_len;
if (oz_usb_control_req(port->hpd, req_id, setup,
urb->transfer_buffer, data_len)) {
rc = -ENOMEM;
} else {
/* Note: we are queuing the request after we have
* submitted it to be transmitted. If the request were
* to complete before we queued it then it would not
* be found in the queue. It seems impossible for
* this to happen but if it did the request would
* be resubmitted so the problem would hopefully
* resolve itself. Putting the request into the
* queue before it has been sent is worse since the
* urb could be cancelled while we are using it
* to build the request.
*/
if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
rc = -ENOMEM;
}
}
oz_usb_put(hpd);
out:
if (rc || complete) {
oz_dbg(ON, "Completing request locally\n");
oz_complete_urb(ozhcd->hcd, urb, rc);
} else {
oz_usb_request_heartbeat(port->hpd);
}
}
/*
* Context: tasklet
*/
static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
{
int rc = 0;
struct oz_port *port = urb->hcpriv;
u8 ep_addr;
/* When we are paranoid we keep a list of urbs which we check against
* before handing one back. This is just for debugging during
* development and should be turned off in the released driver.
*/
oz_remember_urb(urb);
/* Check buffer is valid.
*/
if (!urb->transfer_buffer && urb->transfer_buffer_length)
return -EINVAL;
/* Check if there is a device at the port - refuse if not.
*/
if ((port->flags & OZ_PORT_F_PRESENT) == 0)
return -EPIPE;
ep_addr = usb_pipeendpoint(urb->pipe);
if (ep_addr) {
/* If the request is not for EP0 then queue it.
*/
if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
urb, 0))
rc = -EPIPE;
} else {
oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
}
return rc;
}
/*
* Context: tasklet
*/
static void oz_urb_process_tasklet(unsigned long unused)
{
unsigned long irq_state;
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
struct oz_urb_link *urbl, *n;
int rc = 0;
if (ozhcd == NULL)
return;
/* This is called from a tasklet so is in softirq context but the urb
* list is filled from any context so we need to lock
* appropriately while removing urbs.
*/
spin_lock_irqsave(&g_tasklet_lock, irq_state);
list_for_each_entry_safe(urbl, n, &ozhcd->urb_pending_list, link) {
list_del_init(&urbl->link);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
urb = urbl->urb;
oz_free_urb_link(urbl);
rc = oz_urb_process(ozhcd, urb);
if (rc)
oz_complete_urb(ozhcd->hcd, urb, rc);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
/*
* This function searches for the urb in any of the lists it could be in.
* If it is found it is removed from the list and completed. If the urb is
* being processed then it won't be in a list so won't be found. However, the
* call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
* to a non-zero value. When an attempt is made to put the urb back in a list
* the unlinked field will be checked and the urb will then be completed.
* Context: tasklet
*/
static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
{
struct oz_urb_link *urbl = NULL;
struct list_head *e;
struct oz_hcd *ozhcd;
unsigned long irq_state;
u8 ix;
if (port == NULL) {
oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL) {
oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
return;
}
/* Look in the tasklet queue.
*/
spin_lock_irqsave(&g_tasklet_lock, irq_state);
list_for_each(e, &ozhcd->urb_cancel_list) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urb == urbl->urb) {
list_del_init(e);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
goto out2;
}
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
urbl = NULL;
/* Look in the orphanage.
*/
spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
list_for_each(e, &ozhcd->orphanage) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del(e);
oz_dbg(ON, "Found urb in orphanage\n");
goto out;
}
}
ix = (ep_num & 0xf);
urbl = NULL;
if ((ep_num & USB_DIR_IN) && ix)
urbl = oz_remove_urb(port->in_ep[ix], urb);
else
urbl = oz_remove_urb(port->out_ep[ix], urb);
out:
spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
out2:
if (urbl) {
urb->actual_length = 0;
oz_free_urb_link(urbl);
oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
}
}
/*
* Context: tasklet
*/
static void oz_urb_cancel_tasklet(unsigned long unused)
{
unsigned long irq_state;
struct urb *urb;
struct oz_urb_link *urbl, *n;
struct oz_hcd *ozhcd = oz_hcd_claim();
if (ozhcd == NULL)
return;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
list_for_each_entry_safe(urbl, n, &ozhcd->urb_cancel_list, link) {
list_del_init(&urbl->link);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
urb = urbl->urb;
if (urb->unlinked)
oz_urb_cancel(urbl->port, urbl->ep_num, urb);
oz_free_urb_link(urbl);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
/*
* Context: unknown
*/
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
{
if (ozhcd) {
struct oz_urb_link *urbl, *n;
list_for_each_entry_safe(urbl, n, &ozhcd->orphanage, link) {
list_del(&urbl->link);
oz_complete_urb(ozhcd->hcd, urbl->urb, status);
oz_free_urb_link(urbl);
}
}
}
/*
* Context: unknown
*/
static int oz_hcd_start(struct usb_hcd *hcd)
{
hcd->power_budget = 200;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
return 0;
}
/*
* Context: unknown
*/
static void oz_hcd_stop(struct usb_hcd *hcd)
{
}
/*
* Context: unknown
*/
static void oz_hcd_shutdown(struct usb_hcd *hcd)
{
}
/*
* Called to queue an urb for the device.
* This function should return a non-zero error code if it fails the urb but
* should not call usb_hcd_giveback_urb().
* Context: any
*/
static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int rc;
int port_ix;
struct oz_port *port;
unsigned long irq_state;
struct oz_urb_link *urbl;
oz_dbg(URB, "%s: (%p)\n", __func__, urb);
if (unlikely(ozhcd == NULL)) {
oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
return -EPIPE;
}
if (unlikely(hcd->state != HC_STATE_RUNNING)) {
oz_dbg(URB, "Refused urb(%p) not running\n", urb);
return -EPIPE;
}
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
if (port_ix < 0)
return -EPIPE;
port = &ozhcd->ports[port_ix];
if (port == NULL)
return -EPIPE;
if (!(port->flags & OZ_PORT_F_PRESENT) ||
(port->flags & OZ_PORT_F_CHANGED)) {
oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
port_ix, urb->dev->devnum);
return -EPIPE;
}
urb->hcpriv = port;
/* Put request in queue for processing by tasklet.
*/
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
urbl->urb = urb;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (unlikely(rc)) {
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_free_urb_link(urbl);
return rc;
}
list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
tasklet_schedule(&g_urb_process_tasklet);
atomic_inc(&g_pending_urbs);
return 0;
}
/*
* Context: tasklet
*/
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
struct oz_urb_link *urbl;
if (unlikely(ep == NULL))
return NULL;
list_for_each_entry(urbl, &ep->urb_list, link) {
if (urbl->urb == urb) {
list_del_init(&urbl->link);
if (usb_pipeisoc(urb->pipe)) {
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
}
return urbl;
}
}
return NULL;
}
/*
* Called to dequeue a previously submitted urb for the device.
* Context: any
*/
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
struct oz_urb_link *urbl;
int rc;
unsigned long irq_state;
oz_dbg(URB, "%s: (%p)\n", __func__, urb);
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
/* The following function checks the urb is still in the queue
* maintained by the core and that the unlinked field is zero.
* If both are true the function sets the unlinked field and returns
* zero. Otherwise it returns an error.
*/
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
/* We have to check we haven't completed the urb or are about
* to complete it. When we do we set hcpriv to 0 so if this has
* already happened we don't put the urb in the cancel queue.
*/
if ((rc == 0) && urb->hcpriv) {
urbl->urb = urb;
urbl->port = (struct oz_port *)urb->hcpriv;
urbl->ep_num = usb_pipeendpoint(urb->pipe);
if (usb_pipein(urb->pipe))
urbl->ep_num |= USB_DIR_IN;
list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
tasklet_schedule(&g_urb_cancel_tasklet);
} else {
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_free_urb_link(urbl);
}
return rc;
}
/*
* Context: unknown
*/
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
}
/*
* Context: unknown
*/
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
}
/*
* Context: unknown
*/
static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
{
oz_dbg(ON, "oz_hcd_get_frame_number\n");
return oz_usb_get_frame_number();
}
/*
* Context: softirq
* This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
* always do that in softirq context.
*/
static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int i;
buf[0] = 0;
buf[1] = 0;
spin_lock_bh(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
oz_dbg(HUB, "Port %d changed\n", i);
ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
if (buf[0] != 0 || buf[1] != 0)
return 2;
return 0;
}
/*
* Context: process
*/
static void oz_get_hub_descriptor(struct usb_hcd *hcd,
struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
/*
* Context: process
*/
static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
{
struct oz_port *port;
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned set_bits = 0;
unsigned clear_bits = 0;
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
break;
case USB_PORT_FEAT_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
clear_bits = USB_PORT_STAT_RESET;
ozhcd->ports[port_id-1].bus_addr = 0;
break;
case USB_PORT_FEAT_POWER:
oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
set_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
break;
case USB_PORT_FEAT_C_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
break;
case USB_PORT_FEAT_C_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
break;
case USB_PORT_FEAT_TEST:
oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (set_bits || clear_bits) {
spin_lock_bh(&port->port_lock);
port->status &= ~clear_bits;
port->status |= set_bits;
spin_unlock_bh(&port->port_lock);
}
oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
return 0;
}
/*
* Context: process
*/
static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
{
struct oz_port *port;
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned clear_bits = 0;
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
clear_bits = USB_PORT_STAT_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
break;
case USB_PORT_FEAT_POWER:
oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
clear_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
clear_bits = USB_PORT_STAT_C_CONNECTION << 16;
break;
case USB_PORT_FEAT_C_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
clear_bits = USB_PORT_STAT_C_ENABLE << 16;
break;
case USB_PORT_FEAT_C_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
clear_bits = USB_PORT_FEAT_C_RESET << 16;
break;
case USB_PORT_FEAT_TEST:
oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (clear_bits) {
spin_lock_bh(&port->port_lock);
port->status &= ~clear_bits;
spin_unlock_bh(&port->port_lock);
}
oz_dbg(HUB, "Port[%d] status = 0x%x\n",
port_id, ozhcd->ports[port_id-1].status);
return 0;
}
/*
* Context: process
*/
static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
{
struct oz_hcd *ozhcd;
u32 status;
if ((windex < 1) || (windex > OZ_NB_PORTS))
return -EPIPE;
ozhcd = oz_hcd_private(hcd);
oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
status = ozhcd->ports[windex-1].status;
put_unaligned(cpu_to_le32(status), (__le32 *)buf);
oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
return 0;
}
/*
* Context: process
*/
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength)
{
int err = 0;
switch (req_type) {
case ClearHubFeature:
oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
break;
case ClearPortFeature:
err = oz_clear_port_feature(hcd, wvalue, windex);
break;
case GetHubDescriptor:
oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
put_unaligned(cpu_to_le32(0), (__le32 *)buf);
break;
case GetPortStatus:
err = oz_get_port_status(hcd, windex, buf);
break;
case SetHubFeature:
oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
break;
case SetPortFeature:
err = oz_set_port_feature(hcd, wvalue, windex);
break;
default:
oz_dbg(HUB, "Other: %d\n", req_type);
break;
}
return err;
}
/*
* Context: process
*/
static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
hcd->state = HC_STATE_SUSPENDED;
ozhcd->flags |= OZ_HDC_F_SUSPENDED;
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
/*
* Context: process
*/
static int oz_hcd_bus_resume(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
hcd->state = HC_STATE_RUNNING;
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
static void oz_plat_shutdown(struct platform_device *dev)
{
}
/*
* Context: process
*/
static int oz_plat_probe(struct platform_device *dev)
{
int i;
int err;
struct usb_hcd *hcd;
struct oz_hcd *ozhcd;
hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
if (hcd == NULL) {
oz_dbg(ON, "Failed to created hcd object OK\n");
return -ENOMEM;
}
ozhcd = oz_hcd_private(hcd);
memset(ozhcd, 0, sizeof(*ozhcd));
INIT_LIST_HEAD(&ozhcd->urb_pending_list);
INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
INIT_LIST_HEAD(&ozhcd->orphanage);
ozhcd->hcd = hcd;
ozhcd->conn_port = -1;
spin_lock_init(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
struct oz_port *port = &ozhcd->ports[i];
port->ozhcd = ozhcd;
port->flags = 0;
port->status = 0;
port->bus_addr = 0xff;
spin_lock_init(&port->port_lock);
}
err = usb_add_hcd(hcd, 0, 0);
if (err) {
oz_dbg(ON, "Failed to add hcd object OK\n");
usb_put_hcd(hcd);
return -1;
}
device_wakeup_enable(hcd->self.controller);
spin_lock_bh(&g_hcdlock);
g_ozhcd = ozhcd;
spin_unlock_bh(&g_hcdlock);
return 0;
}
/*
* Context: unknown
*/
static int oz_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct oz_hcd *ozhcd;
if (hcd == NULL)
return -1;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&g_hcdlock);
if (ozhcd == g_ozhcd)
g_ozhcd = NULL;
spin_unlock_bh(&g_hcdlock);
oz_dbg(ON, "Clearing orphanage\n");
oz_hcd_clear_orphanage(ozhcd, -EPIPE);
oz_dbg(ON, "Removing hcd\n");
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
return 0;
}
/*
* Context: unknown
*/
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
{
return 0;
}
/*
* Context: unknown
*/
static int oz_plat_resume(struct platform_device *dev)
{
return 0;
}
/*
* Context: process
*/
int oz_hcd_init(void)
{
int err;
if (usb_disabled())
return -ENODEV;
oz_urb_link_cache = KMEM_CACHE(oz_urb_link, 0);
if (!oz_urb_link_cache)
return -ENOMEM;
tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
err = platform_driver_register(&g_oz_plat_drv);
oz_dbg(ON, "platform_driver_register() returned %d\n", err);
if (err)
goto error;
g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
if (g_plat_dev == NULL) {
err = -ENOMEM;
goto error1;
}
oz_dbg(ON, "platform_device_alloc() succeeded\n");
err = platform_device_add(g_plat_dev);
if (err)
goto error2;
oz_dbg(ON, "platform_device_add() succeeded\n");
return 0;
error2:
platform_device_put(g_plat_dev);
error1:
platform_driver_unregister(&g_oz_plat_drv);
error:
tasklet_disable(&g_urb_process_tasklet);
tasklet_disable(&g_urb_cancel_tasklet);
oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
return err;
}
/*
* Context: process
*/
void oz_hcd_term(void)
{
msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
tasklet_kill(&g_urb_process_tasklet);
tasklet_kill(&g_urb_cancel_tasklet);
platform_device_unregister(g_plat_dev);
platform_driver_unregister(&g_oz_plat_drv);
oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));
kmem_cache_destroy(oz_urb_link_cache);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1613_0 |
crossvul-cpp_data_bad_2120_0 | /* bpf_jit_comp.c : BPF JIT compiler
*
* Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
/*
* Conventions :
* EAX : BPF A accumulator
* EBX : BPF X accumulator
* RDI : pointer to skb (first argument given to JIT function)
* RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
* ECX,EDX,ESI : scratch registers
* r9d : skb->len - skb->data_len (headlen)
* r8 : skb->data
* -8(RBP) : saved RBX value
* -16(RBP)..-80(RBP) : BPF_MEMWORDS values
*/
int bpf_jit_enable __read_mostly;
/*
* assembly code in arch/x86/net/bpf_jit.S
*/
extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
if (len == 1)
*ptr = bytes;
else if (len == 2)
*(u16 *)ptr = bytes;
else {
*(u32 *)ptr = bytes;
barrier();
}
return ptr + len;
}
#define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
static inline bool is_imm8(int value)
{
return value <= 127 && value >= -128;
}
static inline bool is_near(int offset)
{
return offset <= 127 && offset >= -128;
}
#define EMIT_JMP(offset) \
do { \
if (offset) { \
if (is_near(offset)) \
EMIT2(0xeb, offset); /* jmp .+off8 */ \
else \
EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \
} \
} while (0)
/* list of x86 cond jumps opcodes (. + s8)
* Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
*/
#define X86_JB 0x72
#define X86_JAE 0x73
#define X86_JE 0x74
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA 0x77
#define EMIT_COND_JMP(op, offset) \
do { \
if (is_near(offset)) \
EMIT2(op, offset); /* jxx .+off8 */ \
else { \
EMIT2(0x0f, op + 0x10); \
EMIT(offset, 4); /* jxx .+off32 */ \
} \
} while (0)
#define COND_SEL(CODE, TOP, FOP) \
case CODE: \
t_op = TOP; \
f_op = FOP; \
goto cond_branch
#define SEEN_DATAREF 1 /* might call external helpers */
#define SEEN_XREG 2 /* ebx is used */
#define SEEN_MEM 4 /* use mem[] for temporary storage */
static inline void bpf_flush_icache(void *start, void *end)
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
smp_wmb();
flush_icache_range((unsigned long)start, (unsigned long)end);
set_fs(old_fs);
}
void bpf_jit_compile(struct sk_filter *fp)
{
u8 temp[64];
u8 *prog;
unsigned int proglen, oldproglen = 0;
int ilen, i;
int t_offset, f_offset;
u8 t_op, f_op, seen = 0, pass;
u8 *image = NULL;
u8 *func;
int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
unsigned int cleanup_addr; /* epilogue code offset */
unsigned int *addrs;
const struct sock_filter *filter = fp->insns;
int flen = fp->len;
if (!bpf_jit_enable)
return;
addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
/* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes
*/
for (proglen = 0, i = 0; i < flen; i++) {
proglen += 64;
addrs[i] = proglen;
}
cleanup_addr = proglen; /* epilogue address */
for (pass = 0; pass < 10; pass++) {
/* no prologue/epilogue for trivial filters (RET something) */
proglen = 0;
prog = temp;
if (seen) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */
if (seen & (SEEN_XREG | SEEN_DATAREF))
EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
if (seen & SEEN_XREG)
CLEAR_X(); /* make sure we dont leek kernel memory */
/*
* If this filter needs to access skb data,
* loads r9 and r8 with :
* r9 = skb->len - skb->data_len
* r8 = skb->data
*/
if (seen & SEEN_DATAREF) {
if (offsetof(struct sk_buff, len) <= 127)
/* mov off8(%rdi),%r9d */
EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
else {
/* mov off32(%rdi),%r9d */
EMIT3(0x44, 0x8b, 0x8f);
EMIT(offsetof(struct sk_buff, len), 4);
}
if (is_imm8(offsetof(struct sk_buff, data_len)))
/* sub off8(%rdi),%r9d */
EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
else {
EMIT3(0x44, 0x2b, 0x8f);
EMIT(offsetof(struct sk_buff, data_len), 4);
}
if (is_imm8(offsetof(struct sk_buff, data)))
/* mov off8(%rdi),%r8 */
EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
else {
/* mov off32(%rdi),%r8 */
EMIT3(0x4c, 0x8b, 0x87);
EMIT(offsetof(struct sk_buff, data), 4);
}
}
}
switch (filter[0].code) {
case BPF_S_RET_K:
case BPF_S_LD_W_LEN:
case BPF_S_ANC_PROTOCOL:
case BPF_S_ANC_IFINDEX:
case BPF_S_ANC_MARK:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_CPU:
case BPF_S_ANC_QUEUE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
/* first instruction sets A register (or is RET 'constant') */
break;
default:
/* make sure we dont leak kernel information to user */
CLEAR_A(); /* A = 0 */
}
for (i = 0; i < flen; i++) {
unsigned int K = filter[i].k;
switch (filter[i].code) {
case BPF_S_ALU_ADD_X: /* A += X; */
seen |= SEEN_XREG;
EMIT2(0x01, 0xd8); /* add %ebx,%eax */
break;
case BPF_S_ALU_ADD_K: /* A += K; */
if (!K)
break;
if (is_imm8(K))
EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
else
EMIT1_off32(0x05, K); /* add imm32,%eax */
break;
case BPF_S_ALU_SUB_X: /* A -= X; */
seen |= SEEN_XREG;
EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
break;
case BPF_S_ALU_SUB_K: /* A -= K */
if (!K)
break;
if (is_imm8(K))
EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
else
EMIT1_off32(0x2d, K); /* sub imm32,%eax */
break;
case BPF_S_ALU_MUL_X: /* A *= X; */
seen |= SEEN_XREG;
EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
break;
case BPF_S_ALU_MUL_K: /* A *= K */
if (is_imm8(K))
EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
else {
EMIT2(0x69, 0xc0); /* imul imm32,%eax */
EMIT(K, 4);
}
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
seen |= SEEN_XREG;
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
if (pc_ret0 != -1)
EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
else {
EMIT_COND_JMP(X86_JNE, 2 + 5);
CLEAR_A();
EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
}
EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
break;
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
EMIT(K, 4);
EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
EMIT2(0x21, 0xd8); /* and %ebx,%eax */
break;
case BPF_S_ALU_AND_K:
if (K >= 0xFFFFFF00) {
EMIT2(0x24, K & 0xFF); /* and imm8,%al */
} else if (K >= 0xFFFF0000) {
EMIT2(0x66, 0x25); /* and imm16,%ax */
EMIT2(K, 2);
} else {
EMIT1_off32(0x25, K); /* and imm32,%eax */
}
break;
case BPF_S_ALU_OR_X:
seen |= SEEN_XREG;
EMIT2(0x09, 0xd8); /* or %ebx,%eax */
break;
case BPF_S_ALU_OR_K:
if (is_imm8(K))
EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
else
EMIT1_off32(0x0d, K); /* or imm32,%eax */
break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
break;
case BPF_S_ALU_LSH_K:
if (K == 0)
break;
else if (K == 1)
EMIT2(0xd1, 0xe0); /* shl %eax */
else
EMIT3(0xc1, 0xe0, K);
break;
case BPF_S_ALU_RSH_X: /* A >>= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
break;
case BPF_S_ALU_RSH_K: /* A >>= K; */
if (K == 0)
break;
else if (K == 1)
EMIT2(0xd1, 0xe8); /* shr %eax */
else
EMIT3(0xc1, 0xe8, K);
break;
case BPF_S_ALU_NEG:
EMIT2(0xf7, 0xd8); /* neg %eax */
break;
case BPF_S_RET_K:
if (!K) {
if (pc_ret0 == -1)
pc_ret0 = i;
CLEAR_A();
} else {
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
}
/* fallinto */
case BPF_S_RET_A:
if (seen) {
if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
if (seen & SEEN_XREG)
EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
EMIT1(0xc9); /* leaveq */
}
EMIT1(0xc3); /* ret */
break;
case BPF_S_MISC_TAX: /* X = A */
seen |= SEEN_XREG;
EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
break;
case BPF_S_MISC_TXA: /* A = X */
seen |= SEEN_XREG;
EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
break;
case BPF_S_LD_IMM: /* A = K */
if (!K)
CLEAR_A();
else
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
break;
case BPF_S_LDX_IMM: /* X = K */
seen |= SEEN_XREG;
if (!K)
CLEAR_X();
else
EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
break;
case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
seen |= SEEN_MEM;
EMIT3(0x8b, 0x45, 0xf0 - K*4);
break;
case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
seen |= SEEN_XREG | SEEN_MEM;
EMIT3(0x8b, 0x5d, 0xf0 - K*4);
break;
case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
seen |= SEEN_MEM;
EMIT3(0x89, 0x45, 0xf0 - K*4);
break;
case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
seen |= SEEN_XREG | SEEN_MEM;
EMIT3(0x89, 0x5d, 0xf0 - K*4);
break;
case BPF_S_LD_W_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, len), 4);
}
break;
case BPF_S_LDX_W_LEN: /* X = skb->len; */
seen |= SEEN_XREG;
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov off8(%rdi),%ebx */
EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
else {
EMIT2(0x8b, 0x9f);
EMIT(offsetof(struct sk_buff, len), 4);
}
break;
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
if (is_imm8(offsetof(struct sk_buff, protocol))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
} else {
EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
EMIT(offsetof(struct sk_buff, protocol), 4);
}
EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
break;
case BPF_S_ANC_IFINDEX:
if (is_imm8(offsetof(struct sk_buff, dev))) {
/* movq off8(%rdi),%rax */
EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
} else {
EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
EMIT(offsetof(struct sk_buff, dev), 4);
}
EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
EMIT(offsetof(struct net_device, ifindex), 4);
break;
case BPF_S_ANC_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
if (is_imm8(offsetof(struct sk_buff, mark))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
} else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, mark), 4);
}
break;
case BPF_S_ANC_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
if (is_imm8(offsetof(struct sk_buff, rxhash))) {
/* mov off8(%rdi),%eax */
EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
} else {
EMIT2(0x8b, 0x87);
EMIT(offsetof(struct sk_buff, rxhash), 4);
}
break;
case BPF_S_ANC_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
/* movzwl off8(%rdi),%eax */
EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
} else {
EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
EMIT(offsetof(struct sk_buff, queue_mapping), 4);
}
break;
case BPF_S_ANC_CPU:
#ifdef CONFIG_SMP
EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
#else
CLEAR_A();
#endif
break;
case BPF_S_LD_W_ABS:
func = sk_load_word;
common_load: seen |= SEEN_DATAREF;
if ((int)K < 0)
goto out;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call */
break;
case BPF_S_LD_H_ABS:
func = sk_load_half;
goto common_load;
case BPF_S_LD_B_ABS:
func = sk_load_byte;
goto common_load;
case BPF_S_LDX_B_MSH:
if ((int)K < 0) {
if (pc_ret0 != -1) {
EMIT_JMP(addrs[pc_ret0] - addrs[i]);
break;
}
CLEAR_A();
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
seen |= SEEN_DATAREF | SEEN_XREG;
t_offset = sk_load_byte_msh - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
break;
case BPF_S_LD_W_IND:
func = sk_load_word_ind;
common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
t_offset = func - (image + addrs[i]);
EMIT1_off32(0xbe, K); /* mov imm32,%esi */
EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
break;
case BPF_S_LD_H_IND:
func = sk_load_half_ind;
goto common_load_ind;
case BPF_S_LD_B_IND:
func = sk_load_byte_ind;
goto common_load_ind;
case BPF_S_JMP_JA:
t_offset = addrs[i + K] - addrs[i];
EMIT_JMP(t_offset);
break;
COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
t_offset = addrs[i + filter[i].jt] - addrs[i];
/* same targets, can avoid doing the test :) */
if (filter[i].jt == filter[i].jf) {
EMIT_JMP(t_offset);
break;
}
switch (filter[i].code) {
case BPF_S_JMP_JGT_X:
case BPF_S_JMP_JGE_X:
case BPF_S_JMP_JEQ_X:
seen |= SEEN_XREG;
EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
break;
case BPF_S_JMP_JSET_X:
seen |= SEEN_XREG;
EMIT2(0x85, 0xd8); /* test %ebx,%eax */
break;
case BPF_S_JMP_JEQ_K:
if (K == 0) {
EMIT2(0x85, 0xc0); /* test %eax,%eax */
break;
}
case BPF_S_JMP_JGT_K:
case BPF_S_JMP_JGE_K:
if (K <= 127)
EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
else
EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
break;
case BPF_S_JMP_JSET_K:
if (K <= 0xFF)
EMIT2(0xa8, K); /* test imm8,%al */
else if (!(K & 0xFFFF00FF))
EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
else if (K <= 0xFFFF) {
EMIT2(0x66, 0xa9); /* test imm16,%ax */
EMIT(K, 2);
} else {
EMIT1_off32(0xa9, K); /* test imm32,%eax */
}
break;
}
if (filter[i].jt != 0) {
if (filter[i].jf)
t_offset += is_near(f_offset) ? 2 : 6;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
break;
}
EMIT_COND_JMP(f_op, f_offset);
break;
default:
/* hmm, too complex filter, give up with jit compiler */
goto out;
}
ilen = prog - temp;
if (image) {
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
module_free(NULL, image);
return;
}
memcpy(image + proglen, temp, ilen);
}
proglen += ilen;
addrs[i] = proglen;
prog = temp;
}
/* last bpf instruction is always a RET :
* use it to give the cleanup instruction(s) addr
*/
cleanup_addr = proglen - 1; /* ret */
if (seen)
cleanup_addr -= 1; /* leaveq */
if (seen & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
if (image) {
WARN_ON(proglen != oldproglen);
break;
}
if (proglen == oldproglen) {
image = module_alloc(max_t(unsigned int,
proglen,
sizeof(struct work_struct)));
if (!image)
goto out;
}
oldproglen = proglen;
}
if (bpf_jit_enable > 1)
pr_err("flen=%d proglen=%u pass=%d image=%p\n",
flen, proglen, pass, image);
if (image) {
if (bpf_jit_enable > 1)
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
16, 1, image, proglen, false);
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
}
out:
kfree(addrs);
return;
}
static void jit_free_defer(struct work_struct *arg)
{
module_free(NULL, arg);
}
/* run from softirq, we must use a work_struct to call
* module_free() from process context
*/
void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter) {
struct work_struct *work = (struct work_struct *)fp->bpf_func;
INIT_WORK(work, jit_free_defer);
schedule_work(work);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2120_0 |
crossvul-cpp_data_bad_5540_0 | /*-
* Copyright (c) 2003-2010 Tim Kientzle
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "archive_platform.h"
__FBSDID("$FreeBSD: head/lib/libarchive/archive_write.c 201099 2009-12-28 03:03:00Z kientzle $");
/*
* This file contains the "essential" portions of the write API, that
* is, stuff that will essentially always be used by any client that
* actually needs to write an archive. Optional pieces have been, as
* far as possible, separated out into separate files to reduce
* needlessly bloating statically-linked clients.
*/
#ifdef HAVE_SYS_WAIT_H
#include <sys/wait.h>
#endif
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
#include <stdio.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include <time.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "archive.h"
#include "archive_entry.h"
#include "archive_private.h"
#include "archive_write_private.h"
static struct archive_vtable *archive_write_vtable(void);
static int _archive_filter_code(struct archive *, int);
static const char *_archive_filter_name(struct archive *, int);
static int64_t _archive_filter_bytes(struct archive *, int);
static int _archive_write_filter_count(struct archive *);
static int _archive_write_close(struct archive *);
static int _archive_write_free(struct archive *);
static int _archive_write_header(struct archive *, struct archive_entry *);
static int _archive_write_finish_entry(struct archive *);
static ssize_t _archive_write_data(struct archive *, const void *, size_t);
struct archive_none {
size_t buffer_size;
size_t avail;
char *buffer;
char *next;
};
static struct archive_vtable *
archive_write_vtable(void)
{
static struct archive_vtable av;
static int inited = 0;
if (!inited) {
av.archive_close = _archive_write_close;
av.archive_filter_bytes = _archive_filter_bytes;
av.archive_filter_code = _archive_filter_code;
av.archive_filter_name = _archive_filter_name;
av.archive_filter_count = _archive_write_filter_count;
av.archive_free = _archive_write_free;
av.archive_write_header = _archive_write_header;
av.archive_write_finish_entry = _archive_write_finish_entry;
av.archive_write_data = _archive_write_data;
inited = 1;
}
return (&av);
}
/*
* Allocate, initialize and return an archive object.
*/
struct archive *
archive_write_new(void)
{
struct archive_write *a;
unsigned char *nulls;
a = (struct archive_write *)malloc(sizeof(*a));
if (a == NULL)
return (NULL);
memset(a, 0, sizeof(*a));
a->archive.magic = ARCHIVE_WRITE_MAGIC;
a->archive.state = ARCHIVE_STATE_NEW;
a->archive.vtable = archive_write_vtable();
/*
* The value 10240 here matches the traditional tar default,
* but is otherwise arbitrary.
* TODO: Set the default block size from the format selected.
*/
a->bytes_per_block = 10240;
a->bytes_in_last_block = -1; /* Default */
/* Initialize a block of nulls for padding purposes. */
a->null_length = 1024;
nulls = (unsigned char *)malloc(a->null_length);
if (nulls == NULL) {
free(a);
return (NULL);
}
memset(nulls, 0, a->null_length);
a->nulls = nulls;
return (&a->archive);
}
/*
* Set the block size. Returns 0 if successful.
*/
int
archive_write_set_bytes_per_block(struct archive *_a, int bytes_per_block)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_NEW, "archive_write_set_bytes_per_block");
a->bytes_per_block = bytes_per_block;
return (ARCHIVE_OK);
}
/*
* Get the current block size. -1 if it has never been set.
*/
int
archive_write_get_bytes_per_block(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY, "archive_write_get_bytes_per_block");
return (a->bytes_per_block);
}
/*
* Set the size for the last block.
* Returns 0 if successful.
*/
int
archive_write_set_bytes_in_last_block(struct archive *_a, int bytes)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY, "archive_write_set_bytes_in_last_block");
a->bytes_in_last_block = bytes;
return (ARCHIVE_OK);
}
/*
* Return the value set above. -1 indicates it has not been set.
*/
int
archive_write_get_bytes_in_last_block(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY, "archive_write_get_bytes_in_last_block");
return (a->bytes_in_last_block);
}
/*
* dev/ino of a file to be rejected. Used to prevent adding
* an archive to itself recursively.
*/
int
archive_write_set_skip_file(struct archive *_a, int64_t d, int64_t i)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY, "archive_write_set_skip_file");
a->skip_file_set = 1;
a->skip_file_dev = d;
a->skip_file_ino = i;
return (ARCHIVE_OK);
}
/*
* Allocate and return the next filter structure.
*/
struct archive_write_filter *
__archive_write_allocate_filter(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
struct archive_write_filter *f;
f = calloc(1, sizeof(*f));
f->archive = _a;
if (a->filter_first == NULL)
a->filter_first = f;
else
a->filter_last->next_filter = f;
a->filter_last = f;
return f;
}
/*
* Write data to a particular filter.
*/
int
__archive_write_filter(struct archive_write_filter *f,
const void *buff, size_t length)
{
int r;
if (length == 0)
return(ARCHIVE_OK);
if (f->write == NULL)
/* If unset, a fatal error has already ocuured, so this filter
* didn't open. We cannot write anything. */
return(ARCHIVE_FATAL);
r = (f->write)(f, buff, length);
f->bytes_written += length;
return (r);
}
/*
* Open a filter.
*/
int
__archive_write_open_filter(struct archive_write_filter *f)
{
if (f->open == NULL)
return (ARCHIVE_OK);
return (f->open)(f);
}
/*
* Close a filter.
*/
int
__archive_write_close_filter(struct archive_write_filter *f)
{
if (f->close != NULL)
return (f->close)(f);
if (f->next_filter != NULL)
return (__archive_write_close_filter(f->next_filter));
return (ARCHIVE_OK);
}
int
__archive_write_output(struct archive_write *a, const void *buff, size_t length)
{
return (__archive_write_filter(a->filter_first, buff, length));
}
int
__archive_write_nulls(struct archive_write *a, size_t length)
{
if (length == 0)
return (ARCHIVE_OK);
while (length > 0) {
size_t to_write = length < a->null_length ? length : a->null_length;
int r = __archive_write_output(a, a->nulls, to_write);
if (r < ARCHIVE_OK)
return (r);
length -= to_write;
}
return (ARCHIVE_OK);
}
static int
archive_write_client_open(struct archive_write_filter *f)
{
struct archive_write *a = (struct archive_write *)f->archive;
struct archive_none *state;
void *buffer;
size_t buffer_size;
f->bytes_per_block = archive_write_get_bytes_per_block(f->archive);
f->bytes_in_last_block =
archive_write_get_bytes_in_last_block(f->archive);
buffer_size = f->bytes_per_block;
state = (struct archive_none *)calloc(1, sizeof(*state));
buffer = (char *)malloc(buffer_size);
if (state == NULL || buffer == NULL) {
free(state);
free(buffer);
archive_set_error(f->archive, ENOMEM,
"Can't allocate data for output buffering");
return (ARCHIVE_FATAL);
}
state->buffer_size = buffer_size;
state->buffer = buffer;
state->next = state->buffer;
state->avail = state->buffer_size;
f->data = state;
if (a->client_opener == NULL)
return (ARCHIVE_OK);
return (a->client_opener(f->archive, a->client_data));
}
static int
archive_write_client_write(struct archive_write_filter *f,
const void *_buff, size_t length)
{
struct archive_write *a = (struct archive_write *)f->archive;
struct archive_none *state = (struct archive_none *)f->data;
const char *buff = (const char *)_buff;
ssize_t remaining, to_copy;
ssize_t bytes_written;
remaining = length;
/*
* If there is no buffer for blocking, just pass the data
* straight through to the client write callback. In
* particular, this supports "no write delay" operation for
* special applications. Just set the block size to zero.
*/
if (state->buffer_size == 0) {
while (remaining > 0) {
bytes_written = (a->client_writer)(&a->archive,
a->client_data, buff, remaining);
if (bytes_written <= 0)
return (ARCHIVE_FATAL);
remaining -= bytes_written;
buff += bytes_written;
}
return (ARCHIVE_OK);
}
/* If the copy buffer isn't empty, try to fill it. */
if (state->avail < state->buffer_size) {
/* If buffer is not empty... */
/* ... copy data into buffer ... */
to_copy = ((size_t)remaining > state->avail) ?
state->avail : (size_t)remaining;
memcpy(state->next, buff, to_copy);
state->next += to_copy;
state->avail -= to_copy;
buff += to_copy;
remaining -= to_copy;
/* ... if it's full, write it out. */
if (state->avail == 0) {
char *p = state->buffer;
size_t to_write = state->buffer_size;
while (to_write > 0) {
bytes_written = (a->client_writer)(&a->archive,
a->client_data, p, to_write);
if (bytes_written <= 0)
return (ARCHIVE_FATAL);
if ((size_t)bytes_written > to_write) {
archive_set_error(&(a->archive),
-1, "write overrun");
return (ARCHIVE_FATAL);
}
p += bytes_written;
to_write -= bytes_written;
}
state->next = state->buffer;
state->avail = state->buffer_size;
}
}
while ((size_t)remaining >= state->buffer_size) {
/* Write out full blocks directly to client. */
bytes_written = (a->client_writer)(&a->archive,
a->client_data, buff, state->buffer_size);
if (bytes_written <= 0)
return (ARCHIVE_FATAL);
buff += bytes_written;
remaining -= bytes_written;
}
if (remaining > 0) {
/* Copy last bit into copy buffer. */
memcpy(state->next, buff, remaining);
state->next += remaining;
state->avail -= remaining;
}
return (ARCHIVE_OK);
}
static int
archive_write_client_close(struct archive_write_filter *f)
{
struct archive_write *a = (struct archive_write *)f->archive;
struct archive_none *state = (struct archive_none *)f->data;
ssize_t block_length;
ssize_t target_block_length;
ssize_t bytes_written;
int ret = ARCHIVE_OK;
/* If there's pending data, pad and write the last block */
if (state->next != state->buffer) {
block_length = state->buffer_size - state->avail;
/* Tricky calculation to determine size of last block */
if (a->bytes_in_last_block <= 0)
/* Default or Zero: pad to full block */
target_block_length = a->bytes_per_block;
else
/* Round to next multiple of bytes_in_last_block. */
target_block_length = a->bytes_in_last_block *
( (block_length + a->bytes_in_last_block - 1) /
a->bytes_in_last_block);
if (target_block_length > a->bytes_per_block)
target_block_length = a->bytes_per_block;
if (block_length < target_block_length) {
memset(state->next, 0,
target_block_length - block_length);
block_length = target_block_length;
}
bytes_written = (a->client_writer)(&a->archive,
a->client_data, state->buffer, block_length);
ret = bytes_written <= 0 ? ARCHIVE_FATAL : ARCHIVE_OK;
}
if (a->client_closer)
(*a->client_closer)(&a->archive, a->client_data);
free(state->buffer);
free(state);
/* Clear the close handler myself not to be called again. */
f->close = NULL;
a->client_data = NULL;
return (ret);
}
/*
* Open the archive using the current settings.
*/
int
archive_write_open(struct archive *_a, void *client_data,
archive_open_callback *opener, archive_write_callback *writer,
archive_close_callback *closer)
{
struct archive_write *a = (struct archive_write *)_a;
struct archive_write_filter *client_filter;
int ret, r1;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_NEW, "archive_write_open");
archive_clear_error(&a->archive);
a->client_writer = writer;
a->client_opener = opener;
a->client_closer = closer;
a->client_data = client_data;
client_filter = __archive_write_allocate_filter(_a);
client_filter->open = archive_write_client_open;
client_filter->write = archive_write_client_write;
client_filter->close = archive_write_client_close;
ret = __archive_write_open_filter(a->filter_first);
if (ret < ARCHIVE_WARN) {
r1 = __archive_write_close_filter(a->filter_first);
return (r1 < ret ? r1 : ret);
}
a->archive.state = ARCHIVE_STATE_HEADER;
if (a->format_init)
ret = (a->format_init)(a);
return (ret);
}
/*
* Close out the archive.
*/
static int
_archive_write_close(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
int r = ARCHIVE_OK, r1 = ARCHIVE_OK;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL,
"archive_write_close");
if (a->archive.state == ARCHIVE_STATE_NEW
|| a->archive.state == ARCHIVE_STATE_CLOSED)
return (ARCHIVE_OK); /* Okay to close() when not open. */
archive_clear_error(&a->archive);
/* Finish the last entry if a finish callback is specified */
if (a->archive.state == ARCHIVE_STATE_DATA
&& a->format_finish_entry != NULL)
r = ((a->format_finish_entry)(a));
/* Finish off the archive. */
/* TODO: have format closers invoke compression close. */
if (a->format_close != NULL) {
r1 = (a->format_close)(a);
if (r1 < r)
r = r1;
}
/* Finish the compression and close the stream. */
r1 = __archive_write_close_filter(a->filter_first);
if (r1 < r)
r = r1;
if (a->archive.state != ARCHIVE_STATE_FATAL)
a->archive.state = ARCHIVE_STATE_CLOSED;
return (r);
}
static int
_archive_write_filter_count(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
struct archive_write_filter *p = a->filter_first;
int count = 0;
while(p) {
count++;
p = p->next_filter;
}
return count;
}
void
__archive_write_filters_free(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
int r = ARCHIVE_OK, r1;
while (a->filter_first != NULL) {
struct archive_write_filter *next
= a->filter_first->next_filter;
if (a->filter_first->free != NULL) {
r1 = (*a->filter_first->free)(a->filter_first);
if (r > r1)
r = r1;
}
free(a->filter_first);
a->filter_first = next;
}
a->filter_last = NULL;
}
/*
* Destroy the archive structure.
*
* Be careful: user might just call write_new and then write_free.
* Don't assume we actually wrote anything or performed any non-trivial
* initialization.
*/
static int
_archive_write_free(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
int r = ARCHIVE_OK, r1;
if (_a == NULL)
return (ARCHIVE_OK);
/* It is okay to call free() in state FATAL. */
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_ANY | ARCHIVE_STATE_FATAL, "archive_write_free");
if (a->archive.state != ARCHIVE_STATE_FATAL)
r = archive_write_close(&a->archive);
/* Release format resources. */
if (a->format_free != NULL) {
r1 = (a->format_free)(a);
if (r1 < r)
r = r1;
}
__archive_write_filters_free(_a);
/* Release various dynamic buffers. */
free((void *)(uintptr_t)(const void *)a->nulls);
archive_string_free(&a->archive.error_string);
a->archive.magic = 0;
__archive_clean(&a->archive);
free(a);
return (r);
}
/*
* Write the appropriate header.
*/
static int
_archive_write_header(struct archive *_a, struct archive_entry *entry)
{
struct archive_write *a = (struct archive_write *)_a;
int ret, r2;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_DATA | ARCHIVE_STATE_HEADER, "archive_write_header");
archive_clear_error(&a->archive);
if (a->format_write_header == NULL) {
archive_set_error(&(a->archive), -1,
"Format must be set before you can write to an archive.");
a->archive.state = ARCHIVE_STATE_FATAL;
return (ARCHIVE_FATAL);
}
/* In particular, "retry" and "fatal" get returned immediately. */
ret = archive_write_finish_entry(&a->archive);
if (ret == ARCHIVE_FATAL) {
a->archive.state = ARCHIVE_STATE_FATAL;
return (ARCHIVE_FATAL);
}
if (ret < ARCHIVE_OK && ret != ARCHIVE_WARN)
return (ret);
if (a->skip_file_set &&
archive_entry_dev_is_set(entry) &&
archive_entry_ino_is_set(entry) &&
archive_entry_dev(entry) == (dev_t)a->skip_file_dev &&
archive_entry_ino64(entry) == a->skip_file_ino) {
archive_set_error(&a->archive, 0,
"Can't add archive to itself");
return (ARCHIVE_FAILED);
}
/* Format and write header. */
r2 = ((a->format_write_header)(a, entry));
if (r2 == ARCHIVE_FATAL) {
a->archive.state = ARCHIVE_STATE_FATAL;
return (ARCHIVE_FATAL);
}
if (r2 < ret)
ret = r2;
a->archive.state = ARCHIVE_STATE_DATA;
return (ret);
}
static int
_archive_write_finish_entry(struct archive *_a)
{
struct archive_write *a = (struct archive_write *)_a;
int ret = ARCHIVE_OK;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA,
"archive_write_finish_entry");
if (a->archive.state & ARCHIVE_STATE_DATA
&& a->format_finish_entry != NULL)
ret = (a->format_finish_entry)(a);
a->archive.state = ARCHIVE_STATE_HEADER;
return (ret);
}
/*
* Note that the compressor is responsible for blocking.
*/
static ssize_t
_archive_write_data(struct archive *_a, const void *buff, size_t s)
{
struct archive_write *a = (struct archive_write *)_a;
archive_check_magic(&a->archive, ARCHIVE_WRITE_MAGIC,
ARCHIVE_STATE_DATA, "archive_write_data");
archive_clear_error(&a->archive);
return ((a->format_write_data)(a, buff, s));
}
static struct archive_write_filter *
filter_lookup(struct archive *_a, int n)
{
struct archive_write *a = (struct archive_write *)_a;
struct archive_write_filter *f = a->filter_first;
if (n == -1)
return a->filter_last;
if (n < 0)
return NULL;
while (n > 0 && f != NULL) {
f = f->next_filter;
--n;
}
return f;
}
static int
_archive_filter_code(struct archive *_a, int n)
{
struct archive_write_filter *f = filter_lookup(_a, n);
return f == NULL ? -1 : f->code;
}
static const char *
_archive_filter_name(struct archive *_a, int n)
{
struct archive_write_filter *f = filter_lookup(_a, n);
return f == NULL ? NULL : f->name;
}
static int64_t
_archive_filter_bytes(struct archive *_a, int n)
{
struct archive_write_filter *f = filter_lookup(_a, n);
return f == NULL ? -1 : f->bytes_written;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5540_0 |
crossvul-cpp_data_bad_3689_2 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
#include "net_driver.h"
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
#include "filter.h"
#include "nic.h"
struct ethtool_string {
char name[ETH_GSTRING_LEN];
};
struct efx_ethtool_stat {
const char *name;
enum {
EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
EFX_ETHTOOL_STAT_SOURCE_channel,
EFX_ETHTOOL_STAT_SOURCE_tx_queue
} source;
unsigned offset;
u64(*get_stat) (void *field); /* Reader function */
};
/* Initialiser for a struct #efx_ethtool_stat with type-checking */
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
get_stat_function) { \
.name = #stat_name, \
.source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
.offset = ((((field_type *) 0) == \
&((struct efx_##source_name *)0)->field) ? \
offsetof(struct efx_##source_name, field) : \
offsetof(struct efx_##source_name, field)), \
.get_stat = get_stat_function, \
}
static u64 efx_get_uint_stat(void *field)
{
return *(unsigned int *)field;
}
static u64 efx_get_ulong_stat(void *field)
{
return *(unsigned long *)field;
}
static u64 efx_get_u64_stat(void *field)
{
return *(u64 *) field;
}
static u64 efx_get_atomic_stat(void *field)
{
return atomic_read((atomic_t *) field);
}
#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
EFX_ETHTOOL_STAT(field, mac_stats, field, \
unsigned long, efx_get_ulong_stat)
#define EFX_ETHTOOL_U64_MAC_STAT(field) \
EFX_ETHTOOL_STAT(field, mac_stats, field, \
u64, efx_get_u64_stat)
#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
EFX_ETHTOOL_STAT(name, nic, n_##name, \
unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
EFX_ETHTOOL_STAT(field, nic, field, \
atomic_t, efx_get_atomic_stat)
#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
EFX_ETHTOOL_STAT(field, channel, n_##field, \
unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
static struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
};
/* Number of ethtool statistics */
#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
/**************************************************************************
*
* Ethtool operations
*
**************************************************************************
*/
/* Identify device by flashing LEDs */
static int efx_ethtool_phys_id(struct net_device *net_dev,
enum ethtool_phys_id_state state)
{
struct efx_nic *efx = netdev_priv(net_dev);
enum efx_led_mode mode = EFX_LED_DEFAULT;
switch (state) {
case ETHTOOL_ID_ON:
mode = EFX_LED_ON;
break;
case ETHTOOL_ID_OFF:
mode = EFX_LED_OFF;
break;
case ETHTOOL_ID_INACTIVE:
mode = EFX_LED_DEFAULT;
break;
case ETHTOOL_ID_ACTIVE:
return 1; /* cycle on/off once per second */
}
efx->type->set_id_led(efx, mode);
return 0;
}
/* This must be called with rtnl_lock held. */
static int efx_ethtool_get_settings(struct net_device *net_dev,
struct ethtool_cmd *ecmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
mutex_lock(&efx->mac_lock);
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
/* GMAC does not support 1000Mbps HD */
ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
if (LOOPBACK_INTERNAL(efx)) {
ethtool_cmd_speed_set(ecmd, link_state->speed);
ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/* This must be called with rtnl_lock held. */
static int efx_ethtool_set_settings(struct net_device *net_dev,
struct ethtool_cmd *ecmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
/* GMAC does not support 1000Mbps HD */
if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
(ecmd->duplex != DUPLEX_FULL)) {
netif_dbg(efx, drv, efx->net_dev,
"rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->set_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
struct efx_nic *efx = netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
{
return efx_nic_get_regs_len(netdev_priv(net_dev));
}
static void efx_ethtool_get_regs(struct net_device *net_dev,
struct ethtool_regs *regs, void *buf)
{
struct efx_nic *efx = netdev_priv(net_dev);
regs->version = efx->type->revision;
efx_nic_get_regs(efx, buf);
}
static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
return efx->msg_enable;
}
static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
{
struct efx_nic *efx = netdev_priv(net_dev);
efx->msg_enable = msg_enable;
}
/**
* efx_fill_test - fill in an individual self-test entry
* @test_index: Index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
* @test: Pointer to test result (used only if data != %NULL)
* @unit_format: Unit name format (e.g. "chan\%d")
* @unit_id: Unit id (e.g. 0 for "chan0")
* @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
* @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
*
* Fill in an individual self-test entry.
*/
static void efx_fill_test(unsigned int test_index,
struct ethtool_string *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
struct ethtool_string unit_str, test_str;
/* Fill data value, if applicable */
if (data)
data[test_index] = *test;
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
snprintf(unit_str.name, sizeof(unit_str.name),
unit_format, unit_id);
else
strcpy(unit_str.name, unit_format);
snprintf(test_str.name, sizeof(test_str.name),
test_format, test_id);
snprintf(strings[test_index].name,
sizeof(strings[test_index].name),
"%-6s %-24s", unit_str.name, test_str.name);
}
}
#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
#define EFX_LOOPBACK_NAME(_mode, _counter) \
"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
/**
* efx_fill_loopback_test - fill in a block of loopback self-test entries
* @efx: Efx NIC
* @lb_tests: Efx loopback self-test results structure
* @mode: Loopback test mode
* @test_index: Starting index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
*/
static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
enum efx_loopback_mode mode,
unsigned int test_index,
struct ethtool_string *strings, u64 *data)
{
struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
EFX_LOOPBACK_NAME(mode, "tx_sent"));
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_done[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
EFX_LOOPBACK_NAME(mode, "tx_done"));
}
efx_fill_test(test_index++, strings, data,
&lb_tests->rx_good,
"rx", 0,
EFX_LOOPBACK_NAME(mode, "rx_good"));
efx_fill_test(test_index++, strings, data,
&lb_tests->rx_bad,
"rx", 0,
EFX_LOOPBACK_NAME(mode, "rx_bad"));
return test_index;
}
/**
* efx_ethtool_fill_self_tests - get self-test details
* @efx: Efx NIC
* @tests: Efx self-test results structure, or %NULL
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
struct ethtool_string *strings,
u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
enum efx_loopback_mode mode;
efx_fill_test(n++, strings, data, &tests->phy_alive,
"phy", 0, "alive", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
"core", 0, "interrupt", NULL);
/* Event queues */
efx_for_each_channel(channel, efx) {
efx_fill_test(n++, strings, data,
&tests->eventq_dma[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.dma", NULL);
efx_fill_test(n++, strings, data,
&tests->eventq_int[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.int", NULL);
efx_fill_test(n++, strings, data,
&tests->eventq_poll[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.poll", NULL);
}
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
if (efx->phy_op->run_tests != NULL) {
EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
for (i = 0; true; ++i) {
const char *name;
EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
name = efx->phy_op->test_name(efx, i);
if (name == NULL)
break;
efx_fill_test(n++, strings, data, &tests->phy_ext[i],
"phy", 0, name, NULL);
}
}
/* Loopback tests */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(efx->loopback_modes & (1 << mode)))
continue;
n = efx_fill_loopback_test(efx,
&tests->loopback[mode], mode, n,
strings, data);
}
return n;
}
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return EFX_ETHTOOL_NUM_STATS;
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
NULL, NULL, NULL);
default:
return -EINVAL;
}
}
static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct ethtool_string *ethtool_strings =
(struct ethtool_string *)strings;
int i;
switch (string_set) {
case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
strncpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL,
ethtool_strings, NULL);
break;
default:
/* No other string sets */
break;
}
}
static void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct efx_ethtool_stat *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct rtnl_link_stats64 temp;
int i;
EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
/* Update MAC and NIC statistics */
dev_get_stats(net_dev, &temp);
/* Fill detailed statistics buffer */
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
stat = &efx_ethtool_stats[i];
switch (stat->source) {
case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
data[i] = stat->get_stat((void *)mac_stats +
stat->offset);
break;
case EFX_ETHTOOL_STAT_SOURCE_nic:
data[i] = stat->get_stat((void *)efx + stat->offset);
break;
case EFX_ETHTOOL_STAT_SOURCE_channel:
data[i] = 0;
efx_for_each_channel(channel, efx)
data[i] += stat->get_stat((void *)channel +
stat->offset);
break;
case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
data[i] = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
data[i] +=
stat->get_stat((void *)tx_queue
+ stat->offset);
}
break;
}
}
}
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
int already_up;
int rc = -ENOMEM;
efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
if (!efx_tests)
goto fail;
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
rc = -EIO;
goto fail1;
}
netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
rc = dev_open(efx->net_dev);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
goto fail1;
}
}
rc = efx_selftest(efx, efx_tests, test->flags);
if (!already_up)
dev_close(efx->net_dev);
netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
fail1:
/* Fill ethtool results structures */
efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
kfree(efx_tests);
fail:
if (rc)
test->flags |= ETH_TEST_FL_FAILED;
}
/* Restart autonegotiation */
static int efx_ethtool_nway_reset(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
return mdio45_nway_restart(&efx->mdio);
}
/*
* Each channel has a single IRQ and moderation timer, started by any
* completion (or other event). Unless the module parameter
* separate_tx_channels is set, IRQs and moderation are therefore
* shared between RX and TX completions. In this case, when RX IRQ
* moderation is explicitly changed then TX IRQ moderation is
* automatically changed too, but otherwise we fail if the two values
* are requested to be different.
*
* The hardware does not support a limit on the number of completions
* before an IRQ, so we do not use the max_frames fields. We should
* report and require that max_frames == (usecs != 0), but this would
* invalidate existing user documentation.
*
* The hardware does not have distinct settings for interrupt
* moderation while the previous IRQ is being handled, so we should
* not use the 'irq' fields. However, an earlier developer
* misunderstood the meaning of the 'irq' fields and the driver did
* not support the standard fields. To avoid invalidating existing
* user documentation, we report and accept changes through either the
* standard or 'irq' fields. If both are changed at the same time, we
* prefer the standard field.
*
* We implement adaptive IRQ moderation, but use a different algorithm
* from that assumed in the definition of struct ethtool_coalesce.
* Therefore we do not use any of the adaptive moderation parameters
* in it.
*/
static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
struct efx_nic *efx = netdev_priv(net_dev);
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
coalesce->tx_coalesce_usecs = tx_usecs;
coalesce->tx_coalesce_usecs_irq = tx_usecs;
coalesce->rx_coalesce_usecs = rx_usecs;
coalesce->rx_coalesce_usecs_irq = rx_usecs;
coalesce->use_adaptive_rx_coalesce = rx_adaptive;
return 0;
}
static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
unsigned int tx_usecs, rx_usecs;
bool adaptive, rx_may_override_tx;
int rc;
if (coalesce->use_adaptive_tx_coalesce)
return -EINVAL;
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
rx_usecs = coalesce->rx_coalesce_usecs;
else
rx_usecs = coalesce->rx_coalesce_usecs_irq;
adaptive = coalesce->use_adaptive_rx_coalesce;
/* If channels are shared, TX IRQ moderation can be quietly
* overridden unless it is changed from its old value.
*/
rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
coalesce->tx_coalesce_usecs_irq == tx_usecs);
if (coalesce->tx_coalesce_usecs != tx_usecs)
tx_usecs = coalesce->tx_coalesce_usecs;
else
tx_usecs = coalesce->tx_coalesce_usecs_irq;
rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
rx_may_override_tx);
if (rc != 0)
return rc;
efx_for_each_channel(channel, efx)
efx->type->push_irq_moderation(channel);
return 0;
}
static void efx_ethtool_get_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring)
{
struct efx_nic *efx = netdev_priv(net_dev);
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
ring->rx_pending = efx->rxq_entries;
ring->tx_pending = efx->txq_entries;
}
static int efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring)
{
struct efx_nic *efx = netdev_priv(net_dev);
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_MAX_DMAQ_SIZE)
return -EINVAL;
if (ring->rx_pending < EFX_MIN_RING_SIZE ||
ring->tx_pending < EFX_MIN_RING_SIZE) {
netif_err(efx, drv, efx->net_dev,
"TX and RX queues cannot be smaller than %ld\n",
EFX_MIN_RING_SIZE);
return -EINVAL;
}
return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
}
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
struct efx_nic *efx = netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
bool reset;
int rc = 0;
mutex_lock(&efx->mac_lock);
wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
(pause->tx_pause ? EFX_FC_TX : 0) |
(pause->autoneg ? EFX_FC_AUTO : 0));
if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
netif_dbg(efx, drv, efx->net_dev,
"Flow control unsupported: tx ON rx OFF\n");
rc = -EINVAL;
goto out;
}
if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
netif_dbg(efx, drv, efx->net_dev,
"Autonegotiation is disabled\n");
rc = -EINVAL;
goto out;
}
/* TX flow control may automatically turn itself off if the
* link partner (intermittently) stops responding to pause
* frames. There isn't any indication that this has happened,
* so the best we do is leave it up to the user to spot this
* and fix it be cycling transmit flow control on this end. */
reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
if (EFX_WORKAROUND_11482(efx) && reset) {
if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
/* Recover by resetting the EM block */
falcon_stop_nic_stats(efx);
falcon_drain_tx_fifo(efx);
efx->mac_op->reconfigure(efx);
falcon_start_nic_stats(efx);
} else {
/* Schedule a reset to recover */
efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
}
}
old_adv = efx->link_advertising;
old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc);
if (efx->link_advertising != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"Unable to advertise requested flow "
"control setting\n");
goto out;
}
}
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
efx->mac_op->reconfigure(efx);
out:
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
struct efx_nic *efx = netdev_priv(net_dev);
pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
}
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
struct efx_nic *efx = netdev_priv(net_dev);
return efx->type->get_wol(efx, wol);
}
static int efx_ethtool_set_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
struct efx_nic *efx = netdev_priv(net_dev);
return efx->type->set_wol(efx, wol->wolopts);
}
static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
rc = efx->type->map_reset_flags(flags);
if (rc < 0)
return rc;
return efx_reset(efx, rc);
}
static int
efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rules __always_unused)
{
struct efx_nic *efx = netdev_priv(net_dev);
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = efx->n_rx_channels;
return 0;
case ETHTOOL_GRXFH: {
unsigned min_revision = 0;
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
min_revision = EFX_REV_FALCON_B0;
break;
case TCP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
min_revision = EFX_REV_SIENA_A0;
break;
default:
break;
}
if (efx_nic_rev(efx) < min_revision)
info->data = 0;
return 0;
}
default:
return -EOPNOTSUPP;
}
}
static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
struct ethtool_rx_ntuple *ntuple)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
struct efx_filter_spec filter;
int rc;
/* Range-check action */
if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
ntuple->fs.action >= (s32)efx->n_rx_channels)
return -EINVAL;
if (~ntuple->fs.data_mask)
return -EINVAL;
efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
(ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
0xfff : ntuple->fs.action);
switch (ntuple->fs.flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
IPPROTO_TCP : IPPROTO_UDP);
/* Must match all of destination, */
if (ip_mask->ip4dst | ip_mask->pdst)
return -EINVAL;
/* all or none of source, */
if ((ip_mask->ip4src | ip_mask->psrc) &&
((__force u32)~ip_mask->ip4src |
(__force u16)~ip_mask->psrc))
return -EINVAL;
/* and nothing else */
if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
return -EINVAL;
if (!ip_mask->ip4src)
rc = efx_filter_set_ipv4_full(&filter, proto,
ip_entry->ip4dst,
ip_entry->pdst,
ip_entry->ip4src,
ip_entry->psrc);
else
rc = efx_filter_set_ipv4_local(&filter, proto,
ip_entry->ip4dst,
ip_entry->pdst);
if (rc)
return rc;
break;
}
case ETHER_FLOW:
/* Must match all of destination, */
if (!is_zero_ether_addr(mac_mask->h_dest))
return -EINVAL;
/* all or none of VID, */
if (ntuple->fs.vlan_tag_mask != 0xf000 &&
ntuple->fs.vlan_tag_mask != 0xffff)
return -EINVAL;
/* and nothing else */
if (!is_broadcast_ether_addr(mac_mask->h_source) ||
mac_mask->h_proto != htons(0xffff))
return -EINVAL;
rc = efx_filter_set_eth_local(
&filter,
(ntuple->fs.vlan_tag_mask == 0xf000) ?
ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
mac_entry->h_dest);
if (rc)
return rc;
break;
default:
return -EINVAL;
}
if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
return efx_filter_remove_filter(efx, &filter);
rc = efx_filter_insert_filter(efx, &filter, true);
return rc < 0 ? rc : 0;
}
static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
struct ethtool_rxfh_indir *indir)
{
struct efx_nic *efx = netdev_priv(net_dev);
size_t copy_size =
min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return -EOPNOTSUPP;
indir->size = ARRAY_SIZE(efx->rx_indir_table);
memcpy(indir->ring_index, efx->rx_indir_table,
copy_size * sizeof(indir->ring_index[0]));
return 0;
}
static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
const struct ethtool_rxfh_indir *indir)
{
struct efx_nic *efx = netdev_priv(net_dev);
size_t i;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return -EOPNOTSUPP;
/* Validate size and indices */
if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
if (indir->ring_index[i] >= efx->n_rx_channels)
return -EINVAL;
memcpy(efx->rx_indir_table, indir->ring_index,
sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx);
return 0;
}
const struct ethtool_ops efx_ethtool_ops = {
.get_settings = efx_ethtool_get_settings,
.set_settings = efx_ethtool_set_settings,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
.nway_reset = efx_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
.get_ringparam = efx_ethtool_get_ringparam,
.set_ringparam = efx_ethtool_set_ringparam,
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_sset_count = efx_ethtool_get_sset_count,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.set_phys_id = efx_ethtool_phys_id,
.get_ethtool_stats = efx_ethtool_get_stats,
.get_wol = efx_ethtool_get_wol,
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rx_ntuple = efx_ethtool_set_rx_ntuple,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3689_2 |
crossvul-cpp_data_good_5818_0 | /*
* PNG image format
* Copyright (c) 2008 Loren Merrit <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "png.h"
#include "pngdsp.h"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL/255 * 0x7f)
#define pb_80 (~0UL/255 * 0x80)
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
{
long i;
for (i = 0; i <= w - (int)sizeof(long); i += sizeof(long)) {
long a = *(long *)(src1 + i);
long b = *(long *)(src2 + i);
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
}
for (; i < w; i++)
dst[i] = src1[i] + src2[i];
}
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
{
dsp->add_bytes_l2 = add_bytes_l2_c;
dsp->add_paeth_prediction = ff_add_png_paeth_prediction;
if (ARCH_X86) ff_pngdsp_init_x86(dsp);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5818_0 |
crossvul-cpp_data_good_1696_0 | /*
* History:
* Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
* to allow user process control of SCSI devices.
* Development Sponsored by Killy Corp. NY NY
*
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
* Copyright (C) 1998 - 2014 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
*/
static int sg_version_num = 30536; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.36"
/*
* D. P. Gilbert (dgilbert@interlog.com), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_date = "20140603";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
#endif
#define SG_ALLOW_DIO_DEF 0
#define SG_MAX_DEVS 32768
/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
* of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
* than 16 bytes are "variable length" whose length is a multiple of 4
*/
#define SG_MAX_CDB_SIZE 252
/*
* Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
* Then when using 32 bit integers x * m may overflow during the calculation.
* Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
* calculates the same, but prevents the overflow when both m and d
* are "small" numbers (like HZ and USER_HZ).
* Of course an overflow is inavoidable if the result of muldiv doesn't fit
* in 32 bits.
*/
#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
for use by this file descriptor. [Deprecated usage: this variable is also
readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
the kernel (i.e. it is not a module).] */
static int def_reserved_size = -1; /* picks up init parameter */
static int sg_allow_dio = SG_ALLOW_DIO_DEF;
static int scatter_elem_sz = SG_SCATTER_SZ;
static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
static int sg_add_device(struct device *, struct class_interface *);
static void sg_remove_device(struct device *, struct class_interface *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = {
.add_dev = sg_add_device,
.remove_dev = sg_remove_device,
};
typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
struct page **pages;
int page_order;
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
struct sg_request *nextrp; /* NULL -> tail request (slist) */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
/* done protected by rq_list_lock */
char done; /* 0->before bh, 1->before read, 2->read */
struct request *rq;
struct bio *bio;
struct execute_work ew;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
struct list_head sfd_siblings; /* protected by device's sfd_lock */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
unsigned save_scat_len; /* original length of trunc. scat. element */
Sg_request *headrp; /* head of request slist, NULL->empty */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
struct mutex open_rel_lock; /* held when in open() or release() */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
struct list_head sfds;
rwlock_t sfd_lock; /* protect access to sfd list */
atomic_t detaching; /* 0->device usable, 1->device detaching */
bool exclude; /* 1->open(O_EXCL) succeeded and is active */
int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
struct kref d_ref;
} Sg_device;
/* tasklet or soft irq callback */
static void sg_rq_end_io(struct request *rq, int uptodate);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
Sg_request * srp);
static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
const char __user *buf, size_t count, int blocking,
int read_only, int sg_io_owned, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
#define SZ_SG_HEADER sizeof(struct sg_header)
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
#define sg_printk(prefix, sdp, fmt, a...) \
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
open_wait(Sg_device *sdp, int flags)
{
int retval = 0;
if (flags & O_EXCL) {
while (sdp->open_cnt > 0) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->open_cnt));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
} else {
while (sdp->exclude) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->exclude));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
}
return retval;
}
/* Returns 0 on success, else a negated errno value */
static int
sg_open(struct inode *inode, struct file *filp)
{
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
nonseekable_open(inode, filp);
if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
return -EPERM; /* Can't lock it with read only access */
sdp = sg_get_dev(dev);
if (IS_ERR(sdp))
return PTR_ERR(sdp);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_open: flags=0x%x\n", flags));
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
retval = scsi_device_get(sdp->device);
if (retval)
goto sg_put;
retval = scsi_autopm_get_device(sdp->device);
if (retval)
goto sdp_put;
/* scsi_block_when_processing_errors() may block so bypass
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
}
mutex_lock(&sdp->open_rel_lock);
if (flags & O_NONBLOCK) {
if (flags & O_EXCL) {
if (sdp->open_cnt > 0) {
retval = -EBUSY;
goto error_mutex_locked;
}
} else {
if (sdp->exclude) {
retval = -EBUSY;
goto error_mutex_locked;
}
}
} else {
retval = open_wait(sdp, flags);
if (retval) /* -ERESTARTSYS or -ENODEV */
goto error_mutex_locked;
}
/* N.B. at this point we are holding the open_rel_lock */
if (flags & O_EXCL)
sdp->exclude = true;
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
if (IS_ERR(sfp)) {
retval = PTR_ERR(sfp);
goto out_undo;
}
filp->private_data = sfp;
sdp->open_cnt++;
mutex_unlock(&sdp->open_rel_lock);
retval = 0;
sg_put:
kref_put(&sdp->d_ref, sg_device_destroy);
return retval;
out_undo:
if (flags & O_EXCL) {
sdp->exclude = false; /* undo if error */
wake_up_interruptible(&sdp->open_wait);
}
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
goto sg_put;
}
/* Release resources associated with a successful sg_open()
* Returns 0 on success, else a negated errno value */
static int
sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
mutex_lock(&sdp->open_rel_lock);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
sdp->open_cnt--;
/* possibly many open()s waiting on exlude clearing, start many;
* only open(O_EXCL)s wait on 0==open_cnt so only start one */
if (sdp->exclude) {
sdp->exclude = false;
wake_up_interruptible_all(&sdp->open_wait);
} else if (0 == sdp->open_cnt) {
wake_up_interruptible(&sdp->open_wait);
}
mutex_unlock(&sdp->open_rel_lock);
return 0;
}
static ssize_t
sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
sg_io_hdr_t *hp;
struct sg_header *old_hdr = NULL;
int retval = 0;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (!old_hdr)
return -ENOMEM;
if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
retval =__copy_from_user
(new_hdr, buf, SZ_SG_IO_HDR);
req_pack_id = new_hdr->pack_id;
kfree(new_hdr);
if (retval) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
req_pack_id = old_hdr->pack_id;
}
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (filp->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (retval) {
/* -ERESTARTSYS as signal hit process */
goto free_old_hdr;
}
}
if (srp->header.interface_id != '\0') {
retval = sg_new_read(sfp, buf, count, srp);
goto free_old_hdr;
}
hp = &srp->header;
if (old_hdr == NULL) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (! old_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
}
memset(old_hdr, 0, SZ_SG_HEADER);
old_hdr->reply_len = (int) hp->timeout;
old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
old_hdr->pack_id = hp->pack_id;
old_hdr->twelve_byte =
((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
old_hdr->target_status = hp->masked_status;
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status))
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
case DID_OK:
case DID_PASSTHROUGH:
case DID_SOFT_ERROR:
old_hdr->result = 0;
break;
case DID_NO_CONNECT:
case DID_BUS_BUSY:
case DID_TIME_OUT:
old_hdr->result = EBUSY;
break;
case DID_BAD_TARGET:
case DID_ABORT:
case DID_PARITY:
case DID_RESET:
case DID_BAD_INTR:
old_hdr->result = EIO;
break;
case DID_ERROR:
old_hdr->result = (srp->sense_b[0] == 0 &&
hp->masked_status == GOOD) ? 0 : EIO;
break;
default:
old_hdr->result = EIO;
break;
}
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
buf += SZ_SG_HEADER;
if (count > old_hdr->reply_len)
count = old_hdr->reply_len;
if (count > SZ_SG_HEADER) {
if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
return retval;
}
static ssize_t
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
int err = 0, err2;
int len;
if (count < SZ_SG_IO_HDR) {
err = -EINVAL;
goto err_out;
}
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status)) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
len = (len > sb_len) ? sb_len : len;
if (copy_to_user(hp->sbp, srp->sense_b, len)) {
err = -EFAULT;
goto err_out;
}
hp->sb_len_wr = len;
}
}
if (hp->masked_status || hp->host_status || hp->driver_status)
hp->info |= SG_INFO_CHECK;
if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
err = -EFAULT;
goto err_out;
}
err_out:
err2 = sg_finish_rem_req(srp);
return err ? : err2 ? : count;
}
static ssize_t
sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
int mxsize, cmd_size, k;
int input_size, blocking;
unsigned char opcode;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_write: count=%d\n", (int) count));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, filp, buf, count,
blocking, 0, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
mxsize -= SZ_SG_HEADER;
input_size -= SZ_SG_HEADER;
if (input_size < 0) {
sg_remove_request(sfp, srp);
return -EIO; /* User did not pass enough bytes for this command. */
}
hp = &srp->header;
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
hp->cmd_len = (unsigned char) cmd_size;
hp->iovec_count = 0;
hp->mx_sb_len = 0;
if (input_size > 0)
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
if (hp->dxfer_direction == SG_DXFER_TO_DEV)
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
static char cmd[TASK_COMM_LEN];
if (strcmp(current->comm, cmd)) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
strcpy(cmd, current->comm);
}
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
static ssize_t
sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
size_t count, int blocking, int read_only, int sg_io_owned,
Sg_request **o_srp)
{
int k;
Sg_request *srp;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
int timeout;
unsigned long ul_timeout;
if (count < SZ_SG_IO_HDR)
return -EINVAL;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_new_write: queue full\n"));
return -EDOM;
}
srp->sg_io_owned = sg_io_owned;
hp = &srp->header;
if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (hp->interface_id != 'S') {
sg_remove_request(sfp, srp);
return -ENOSYS;
}
if (hp->flags & SG_FLAG_MMAP_IO) {
if (hp->dxfer_len > sfp->reserve.bufflen) {
sg_remove_request(sfp, srp);
return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
}
if (hp->flags & SG_FLAG_DIRECT_IO) {
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
if (sg_res_in_use(sfp)) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
}
ul_timeout = msecs_to_jiffies(srp->header.timeout);
timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
}
if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (read_only && sg_allow_access(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
if (k < 0)
return k;
if (o_srp)
*o_srp = srp;
return count;
}
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
int k, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
hp->status = 0;
hp->masked_status = 0;
hp->msg_status = 0;
hp->info = 0;
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio)
blk_end_request_all(srp->rq, -EIO);
sg_finish_rem_req(srp);
return -ENODEV;
}
hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
at_head = 0;
else
at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
srp->rq, at_head, sg_rq_end_io);
return 0;
}
static int srp_done(Sg_fd *sfp, Sg_request *srp)
{
unsigned long flags;
int ret;
read_lock_irqsave(&sfp->rq_list_lock, flags);
ret = srp->done;
read_unlock_irqrestore(&sfp->rq_list_lock, flags);
return ret;
}
static int max_sectors_bytes(struct request_queue *q)
{
unsigned int max_sectors = queue_max_sectors(q);
max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
return max_sectors << 9;
}
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
write_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
srp->orphan = 1;
write_unlock_irq(&sfp->rq_list_lock);
return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EIO;
if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
val = MULDIV (INT_MAX, USER_HZ, HZ);
sfp->timeout_user = val;
sfp->timeout = MULDIV (val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
result = get_user(val, ip);
if (result)
return result;
if (val) {
sfp->low_dma = 1;
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
} else {
if (atomic_read(&sdp->detaching))
return -ENODEV;
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
}
return 0;
case SG_GET_LOW_DMA:
return put_user((int) sfp->low_dma, ip);
case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT;
else {
sg_scsi_id_t __user *sg_idp = p;
if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
__put_user((int) sdp->device->channel,
&sg_idp->channel);
__put_user((int) sdp->device->id, &sg_idp->scsi_id);
__put_user((int) sdp->device->lun, &sg_idp->lun);
__put_user((int) sdp->device->type, &sg_idp->scsi_type);
__put_user((short) sdp->device->host->cmd_per_lun,
&sg_idp->h_cmd_per_lun);
__put_user((short) sdp->device->queue_depth,
&sg_idp->d_queue_depth);
__put_user(0, &sg_idp->unused[0]);
__put_user(0, &sg_idp->unused[1]);
return 0;
}
case SG_SET_FORCE_PACK_ID:
result = get_user(val, ip);
if (result)
return result;
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
__put_user(srp->header.pack_id, ip);
return 0;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
__put_user(-1, ip);
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return put_user(val, ip);
case SG_GET_SG_TABLESIZE:
return put_user(sdp->sg_tablesize, ip);
case SG_SET_RESERVED_SIZE:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
if (result)
return result;
sfp->cmd_q = val ? 1 : 0;
return 0;
case SG_GET_COMMAND_Q:
return put_user((int) sfp->cmd_q, ip);
case SG_SET_KEEP_ORPHAN:
result = get_user(val, ip);
if (result)
return result;
sfp->keep_orphan = val;
return 0;
case SG_GET_KEEP_ORPHAN:
return put_user((int) sfp->keep_orphan, ip);
case SG_NEXT_CMD_LEN:
result = get_user(val, ip);
if (result)
return result;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
return put_user(sg_version_num, ip);
case SG_GET_ACCESS_COUNT:
/* faked - we don't have a real access count anymore */
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
return -EFAULT;
else {
sg_req_info_t *rinfo;
unsigned int ms;
rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
++val, srp = srp ? srp->nextrp : srp) {
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
if (srp) {
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
srp->header.masked_status &
srp->header.host_status &
srp->header.driver_status;
if (srp->done)
rinfo[val].duration =
srp->header.duration;
else {
ms = jiffies_to_msecs(jiffies);
rinfo[val].duration =
(ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
}
rinfo[val].orphan = srp->orphan;
rinfo[val].sg_io_owned =
srp->sg_io_owned;
rinfo[val].pack_id =
srp->header.pack_id;
rinfo[val].usr_ptr =
srp->header.usr_ptr;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
}
case SG_EMULATED_HOST:
if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
Scsi_Ioctl_Command __user *siocp = p;
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
return result;
sdp->sgdebug = (char) val;
return 0;
case BLKSECTGET:
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL,
(char *)arg);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
return blk_trace_startstop(sdp->device->request_queue, 0);
case BLKTRACETEARDOWN:
return blk_trace_remove(sdp->device->request_queue);
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
case SG_SCSI_RESET:
if (atomic_read(&sdp->detaching))
return -ENODEV;
break;
default:
if (read_only)
return -EPERM; /* don't know so take safe approach */
break;
}
result = scsi_ioctl_block_when_processing_errors(sdp->device,
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
return scsi_ioctl(sdp->device, cmd_in, p);
}
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
Sg_device *sdp;
Sg_fd *sfp;
struct scsi_device *sdev;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
sdev = sdp->device;
if (sdev->host->hostt->compat_ioctl) {
int ret;
ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
return ret;
}
return -ENOIOCTLCMD;
}
#endif
static unsigned int
sg_poll(struct file *filp, poll_table * wait)
{
unsigned int res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int count = 0;
unsigned long iflags;
sfp = filp->private_data;
if (!sfp)
return POLLERR;
sdp = sfp->parentdp;
if (!sdp)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
++count;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (atomic_read(&sdp->detaching))
res |= POLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
res |= POLLOUT | POLLWRNORM;
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_poll: res=0x%x\n", (int) res));
return res;
}
static int
sg_fasync(int fd, struct file *filp, int mode)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_fasync: mode=%d\n", mode));
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
static int
sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
rsv_schp = &sfp->reserve;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
if (offset < len) {
struct page *page = nth_page(rsv_schp->pages[k],
offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
}
sa += len;
offset -= len;
}
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct sg_mmap_vm_ops = {
.fault = sg_vma_fault,
};
static int
sg_mmap(struct file *filp, struct vm_area_struct *vma)
{
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
req_sz = vma->vm_end - vma->vm_start;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_mmap starting, vm_start=%p, len=%d\n",
(void *) vma->vm_start, (int) req_sz));
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
if (req_sz > rsv_schp->bufflen)
return -ENOMEM; /* cannot map more than reserved buffer */
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
sa += len;
}
sfp->mmap_called = 1;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
return 0;
}
static void
sg_rq_end_io_usercontext(struct work_struct *work)
{
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
/*
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
static void
sg_rq_end_io(struct request *rq, int uptodate)
{
struct sg_request *srp = rq->end_io_data;
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
unsigned int ms;
char *sense;
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
return;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
return;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
sense = rq->sense;
result = rq->errors;
resid = rq->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
srp->header.pack_id, result));
srp->header.resid = resid;
ms = jiffies_to_msecs(jiffies);
srp->header.duration = (ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
if (0 != result) {
struct scsi_sense_hdr sshdr;
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.masked_status) ||
(COMMAND_TERMINATED == srp->header.masked_status)))
__scsi_print_sense(sdp->device, __func__, sense,
SCSI_SENSE_BUFFERSIZE);
/* Following if statement is a patch supplied by Eric Youngdale */
if (driver_byte(result) != 0
&& scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
&& !scsi_sense_is_deferred(&sshdr)
&& sshdr.sense_key == UNIT_ATTENTION
&& sdp->device->removable) {
/* Detected possible disc change. Set the bit - this */
/* may be used if there are filesystems using this device */
sdp->device->changed = 1;
}
}
/* Rely on write phase to clean out srp status values, so no "else" */
/*
* Free the request as soon as it is complete so that its resources
* can be reused without waiting for userspace to read() the
* result. But keep the associated bio (if any) around until
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
__blk_put_request(rq->q, rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
if (sfp->keep_orphan)
srp->sg_io_owned = 0;
else
done = 0;
}
srp->done = done;
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (likely(done)) {
/* Now wake up any sg_read() that is waiting for this
* packet.
*/
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
kref_put(&sfp->f_ref, sg_remove_sfp);
} else {
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
}
static const struct file_operations sg_fops = {
.owner = THIS_MODULE,
.read = sg_read,
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sg_compat_ioctl,
#endif
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
.fasync = sg_fasync,
.llseek = no_llseek,
};
static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
static Sg_device *
sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
unsigned long iflags;
int error;
u32 k;
sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
"failure\n", __func__);
return ERR_PTR(-ENOMEM);
}
idr_preload(GFP_KERNEL);
write_lock_irqsave(&sg_index_lock, iflags);
error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
if (error < 0) {
if (error == -ENOSPC) {
sdev_printk(KERN_WARNING, scsidp,
"Unable to attach sg device type=%d, minor number exceeds %d\n",
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
sdev_printk(KERN_WARNING, scsidp, "%s: idr "
"allocation Sg_device failure: %d\n",
__func__, error);
}
goto out_unlock;
}
k = error;
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->open_wait);
atomic_set(&sdp->detaching, 0);
rwlock_init(&sdp->sfd_lock);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
error = 0;
out_unlock:
write_unlock_irqrestore(&sg_index_lock, iflags);
idr_preload_end();
if (error) {
kfree(sdp);
return ERR_PTR(error);
}
return sdp;
}
static int
sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
pr_warn("%s: cdev_alloc failed\n", __func__);
goto out;
}
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
sdp = sg_alloc(disk, scsidp);
if (IS_ERR(sdp)) {
pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
goto out;
}
error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
if (error)
goto cdev_add_err;
sdp->cdev = cdev;
if (sg_sysfs_valid) {
struct device *sg_class_member;
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
goto cdev_add_err;
}
error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
pr_err("%s: unable to make symlink 'generic' back "
"to sg%d\n", __func__, sdp->index);
} else
pr_warn("%s: sg_sys Invalid\n", __func__);
sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
"type %d\n", sdp->index, scsidp->type);
dev_set_drvdata(cl_dev, sdp);
return 0;
cdev_add_err:
write_lock_irqsave(&sg_index_lock, iflags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, iflags);
kfree(sdp);
out:
put_disk(disk);
if (cdev)
cdev_del(cdev);
return error;
}
static void
sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
/* CAUTION! Note that the device can still be found via idr_find()
* even though the refcount is 0. Therefore, do idr_remove() BEFORE
* any other cleanup.
*/
write_lock_irqsave(&sg_index_lock, flags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, flags);
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
static void
sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
int val;
if (!sdp)
return;
/* want sdp->detaching non-zero as soon as possible */
val = atomic_inc_return(&sdp->detaching);
if (val > 1)
return; /* only want to do following once per device */
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"%s\n", __func__));
read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
wake_up_interruptible_all(&sdp->open_wait);
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
kref_put(&sdp->d_ref, sg_device_destroy);
}
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
module_param_named(def_reserved_size, def_reserved_size, int,
S_IRUGO | S_IWUSR);
module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Douglas Gilbert");
MODULE_DESCRIPTION("SCSI generic (sg) driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SG_VERSION_STR);
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
"size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
static int __init
init_sg(void)
{
int rc;
if (scatter_elem_sz < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = scatter_elem_sz;
}
if (def_reserved_size >= 0)
sg_big_buff = def_reserved_size;
else
def_reserved_size = sg_big_buff;
rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS, "sg");
if (rc)
return rc;
sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
if ( IS_ERR(sg_sysfs_class) ) {
rc = PTR_ERR(sg_sysfs_class);
goto err_out;
}
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_init();
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
class_destroy(sg_sysfs_class);
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
}
static void __exit
exit_sg(void)
{
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_cleanup();
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
class_destroy(sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
idr_destroy(&sg_index_idr);
}
static int
sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
blk_rq_set_block_pc(rq);
if (hp->cmd_len > BLK_MAX_CDB)
rq->cmd = long_cmdp;
memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
sg_link_reserve(sfp, srp, dxfer_len);
else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res)
return res;
}
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (unlikely(iov_count > MAX_UIOVEC))
return -EINVAL;
if (iov_count) {
int size = sizeof(struct iovec) * iov_count;
struct iovec *iov;
struct iov_iter i;
iov = memdup_user(hp->dxferp, size);
if (IS_ERR(iov))
return PTR_ERR(iov);
iov_iter_init(&i, rw, iov, iov_count,
min_t(size_t, hp->dxfer_len,
iov_length(iov, iov_count)));
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
static int
sg_finish_rem_req(Sg_request *srp)
{
int ret = 0;
Sg_fd *sfp = srp->parentfp;
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_finish_rem_req: res_used=%d\n",
(int) srp->res_used));
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
if (srp->rq) {
if (srp->rq->cmd != srp->rq->__cmd)
kfree(srp->rq->cmd);
blk_put_request(srp->rq);
}
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(sfp, req_schp);
sg_remove_request(sfp, srp);
return ret;
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
int sg_bufflen = tablesize * sizeof(struct page *);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
schp->pages = kzalloc(sg_bufflen, gfp_flags);
if (!schp->pages)
return -ENOMEM;
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: buff_size=%d, blk_size=%d\n",
buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
if (mx_sc_elems < 0)
return mx_sc_elems; /* most likely -ENOMEM */
num = scatter_elem_sz;
if (unlikely(num != scatter_elem_sz_prev)) {
if (num < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = PAGE_SIZE;
} else
scatter_elem_sz_prev = num;
}
if (sfp->low_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
gfp_mask |= __GFP_ZERO;
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
k++, rem_sz -= ret_sz) {
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
scatter_elem_sz = ret_sz;
scatter_elem_sz_prev = ret_sz;
}
}
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
return 0;
out:
for (i = 0; i < k; i++)
__free_pages(schp->pages[i], order);
if (--order >= 0)
goto retry;
return -ENOMEM;
}
static void
sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
if (schp->pages && schp->sglist_len > 0) {
if (!schp->dio_in_use) {
int k;
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
SCSI_LOG_TIMEOUT(5,
sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k=%d, pg=0x%p\n",
k, schp->pages[k]));
__free_pages(schp->pages[k], schp->page_order);
}
kfree(schp->pages);
}
}
memset(schp, 0, sizeof (*schp));
}
static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
int k, num;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_read_oxfer: num_read_xfer=%d\n",
num_read_xfer));
if ((!outp) || (num_read_xfer <= 0))
return 0;
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
if (num_read_xfer <= 0)
break;
outp += num;
}
}
return 0;
}
static void
sg_build_reserve(Sg_fd * sfp, int req_size)
{
Sg_scatter_hold *schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_reserve: req_size=%d\n", req_size));
do {
if (req_size < PAGE_SIZE)
req_size = PAGE_SIZE;
if (0 == sg_build_indirect(schp, sfp, req_size))
return;
else
sg_remove_scat(sfp, schp);
req_size >>= 1; /* divide by 2 */
} while (req_size > (PAGE_SIZE / 2));
}
static void
sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: size=%d\n", size));
rem = size;
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg; k++) {
if (rem <= num) {
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
req_schp->pages = rsv_schp->pages;
req_schp->bufflen = size;
req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
}
if (k >= rsv_schp->k_use_sg)
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: BAD size\n"));
}
static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
}
static Sg_request *
sg_get_rq_mark(Sg_fd * sfp, int pack_id)
{
Sg_request *resp;
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
for (resp = sfp->headrp; resp; resp = resp->nextrp) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
break;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
/* always adds to end of list */
static Sg_request *
sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
resp = sfp->headrp;
if (!resp) {
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
resp = rp;
sfp->headrp = resp;
} else {
if (0 == sfp->cmd_q)
resp = NULL; /* command queuing disallowed */
else {
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
if (!rp->parentfp)
break;
}
if (k < SG_MAX_QUEUE) {
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
while (resp->nextrp)
resp = resp->nextrp;
resp->nextrp = rp;
resp = rp;
} else
resp = NULL;
}
}
if (resp) {
resp->nextrp = NULL;
resp->header.duration = jiffies_to_msecs(jiffies);
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
Sg_request *prev_rp;
Sg_request *rp;
unsigned long iflags;
int res = 0;
if ((!sfp) || (!srp) || (!sfp->headrp))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
prev_rp = sfp->headrp;
if (srp == prev_rp) {
sfp->headrp = prev_rp->nextrp;
prev_rp->parentfp = NULL;
res = 1;
} else {
while ((rp = prev_rp->nextrp)) {
if (srp == rp) {
prev_rp->nextrp = rp->nextrp;
rp->parentfp = NULL;
res = 1;
break;
}
prev_rp = rp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
}
static Sg_fd *
sg_add_sfp(Sg_device * sdp)
{
Sg_fd *sfp;
unsigned long iflags;
int bufflen;
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
kref_init(&sfp->f_ref);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
sdp->device->host->unchecked_isa_dma : 1;
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
max_sectors_bytes(sdp->device->request_queue));
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen,
sfp->reserve.k_use_sg));
kref_get(&sdp->d_ref);
__module_get(THIS_MODULE);
return sfp;
}
static void
sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
/* Cleanup any responses which were never read(). */
while (sfp->headrp)
sg_finish_rem_req(sfp->headrp);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen,
(int) sfp->reserve.k_use_sg));
sg_remove_scat(sfp, &sfp->reserve);
}
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
module_put(THIS_MODULE);
}
static void
sg_remove_sfp(struct kref *kref)
{
struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
write_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
}
static int
sg_res_in_use(Sg_fd * sfp)
{
const Sg_request *srp;
unsigned long iflags;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp)
if (srp->res_used)
break;
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return srp ? 1 : 0;
}
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
{
int *k = data;
if (*k < id)
*k = id;
return 0;
}
static int
sg_last_dev(void)
{
int k = -1;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
read_unlock_irqrestore(&sg_index_lock, iflags);
return k + 1; /* origin 1 */
}
#endif
/* must be called with sg_index_lock held */
static Sg_device *sg_lookup_dev(int dev)
{
return idr_find(&sg_index_idr, dev);
}
static Sg_device *
sg_get_dev(int dev)
{
struct sg_device *sdp;
unsigned long flags;
read_lock_irqsave(&sg_index_lock, flags);
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
else if (atomic_read(&sdp->detaching)) {
/* If sdp->detaching, then the refcount may already be 0, in
* which case it would be a bug to do kref_get().
*/
sdp = ERR_PTR(-ENODEV);
} else
kref_get(&sdp->d_ref);
read_unlock_irqrestore(&sg_index_lock, flags);
return sdp;
}
#ifdef CONFIG_SCSI_PROC_FS
static struct proc_dir_entry *sg_proc_sgp = NULL;
static char sg_proc_sg_dirname[] = "scsi/sg";
static int sg_proc_seq_show_int(struct seq_file *s, void *v);
static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off);
static const struct file_operations adio_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_adio,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_adio,
.release = single_release,
};
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_dressz(struct file *filp,
const char __user *buffer, size_t count, loff_t *off);
static const struct file_operations dressz_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_dressz,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_dressz,
.release = single_release,
};
static int sg_proc_seq_show_version(struct seq_file *s, void *v);
static int sg_proc_single_open_version(struct inode *inode, struct file *file);
static const struct file_operations version_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_version,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
static const struct file_operations devhdr_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_devhdr,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
static int sg_proc_open_dev(struct inode *inode, struct file *file);
static void * dev_seq_start(struct seq_file *s, loff_t *pos);
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
static void dev_seq_stop(struct seq_file *s, void *v);
static const struct file_operations dev_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_dev,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_dev,
};
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
static const struct file_operations devstrs_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_devstrs,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations devstrs_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_devstrs,
};
static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
static int sg_proc_open_debug(struct inode *inode, struct file *file);
static const struct file_operations debug_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_debug,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations debug_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_debug,
};
struct sg_proc_leaf {
const char * name;
const struct file_operations * fops;
};
static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
{"allow_dio", &adio_fops},
{"debug", &debug_fops},
{"def_reserved_size", &dressz_fops},
{"device_hdr", &devhdr_fops},
{"devices", &dev_fops},
{"device_strs", &devstrs_fops},
{"version", &version_fops}
};
static int
sg_proc_init(void)
{
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
}
static void
sg_proc_cleanup(void)
{
int k;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
if (!sg_proc_sgp)
return;
for (k = 0; k < num_leaves; ++k)
remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
remove_proc_entry(sg_proc_sg_dirname, NULL);
}
static int sg_proc_seq_show_int(struct seq_file *s, void *v)
{
seq_printf(s, "%d\n", *((int *)s->private));
return 0;
}
static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
}
static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &num);
if (err)
return err;
sg_allow_dio = num ? 1 : 0;
return count;
}
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
}
static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long k = ULONG_MAX;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &k);
if (err)
return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
}
return -ERANGE;
}
static int sg_proc_seq_show_version(struct seq_file *s, void *v)
{
seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
sg_version_date);
return 0;
}
static int sg_proc_single_open_version(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_version, NULL);
}
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
{
seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
return 0;
}
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_devhdr, NULL);
}
struct sg_proc_deviter {
loff_t index;
size_t max;
};
static void * dev_seq_start(struct seq_file *s, loff_t *pos)
{
struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
s->private = it;
if (! it)
return NULL;
it->index = *pos;
it->max = sg_last_dev();
if (it->index >= it->max)
return NULL;
return it;
}
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct sg_proc_deviter * it = s->private;
*pos = ++it->index;
return (it->index < it->max) ? it : NULL;
}
static void dev_seq_stop(struct seq_file *s, void *v)
{
kfree(s->private);
}
static int sg_proc_open_dev(struct inode *inode, struct file *file)
{
return seq_open(file, &dev_seq_ops);
}
static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if ((NULL == sdp) || (NULL == sdp->device) ||
(atomic_read(&sdp->detaching)))
seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
else {
scsidp = sdp->device;
seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
(int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
{
return seq_open(file, &devstrs_seq_ops);
}
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
scsidp = sdp ? sdp->device : NULL;
if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
scsidp->vendor, scsidp->model, scsidp->rev);
else
seq_puts(s, "<no active device>\n");
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, m, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
const char * cp;
unsigned int ms;
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
k++;
read_lock(&fp->rq_list_lock); /* irqs already disabled */
seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
"(res)sgat=%d low_dma=%d\n", k,
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
(int) fp->low_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
for (m = 0, srp = fp->headrp;
srp != NULL;
++m, srp = srp->nextrp) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
cp = " rb>> ";
} else {
if (SG_INFO_DIRECT_IO_MASK & hp->info)
cp = " dio>> ";
else
cp = " ";
}
seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
seq_puts(s, srp->done ?
((1 == srp->done) ? "rcv:" : "fin:")
: "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
seq_printf(s, " dur=%d", hp->duration);
else {
ms = jiffies_to_msecs(jiffies);
seq_printf(s, " t_o/elap=%d/%d",
(new_interface ? hp->timeout :
jiffies_to_msecs(fp->timeout)),
(ms > hp->duration ? ms - hp->duration : 0));
}
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
if (0 == m)
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
static int sg_proc_open_debug(struct inode *inode, struct file *file)
{
return seq_open(file, &debug_seq_ops);
}
static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
unsigned long iflags;
if (it && (0 == it->index))
seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
(int)it->max, sg_big_buff);
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if (NULL == sdp)
goto skip;
read_lock(&sdp->sfd_lock);
if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
if (atomic_read(&sdp->detaching))
seq_puts(s, "detaching pending close ");
else if (sdp->device) {
struct scsi_device *scsidp = sdp->device;
seq_printf(s, "%d:%d:%d:%llu em=%d",
scsidp->host->host_no,
scsidp->channel, scsidp->id,
scsidp->lun,
scsidp->host->hostt->emulated);
}
seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sg_proc_debug_helper(s, sdp);
}
read_unlock(&sdp->sfd_lock);
skip:
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
#endif /* CONFIG_SCSI_PROC_FS */
module_init(init_sg);
module_exit(exit_sg);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1696_0 |
crossvul-cpp_data_good_2155_1 | /*-
* Copyright (c) 2008 Christos Zoulas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Parse Composite Document Files, the format used in Microsoft Office
* document files before they switched to zipped XML.
* Info from: http://sc.openoffice.org/compdocfileformat.pdf
*
* N.B. This is the "Composite Document File" format, and not the
* "Compound Document Format", nor the "Channel Definition Format".
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: cdf.c,v 1.53 2013/02/26 16:20:42 christos Exp $")
#endif
#include <assert.h>
#ifdef CDF_DEBUG
#include <err.h>
#endif
#include <stdlib.h>
#ifdef PHP_WIN32
#include "win32/unistd.h"
#else
#include <unistd.h>
#endif
#ifndef UINT32_MAX
# define UINT32_MAX (0xffffffff)
#endif
#include <string.h>
#include <time.h>
#include <ctype.h>
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
#ifndef EFTYPE
#define EFTYPE EINVAL
#endif
#include "cdf.h"
#ifdef CDF_DEBUG
#define DPRINTF(a) printf a, fflush(stdout)
#else
#define DPRINTF(a)
#endif
static union {
char s[4];
uint32_t u;
} cdf_bo;
#define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304)
#define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x)))
#define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x)))
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
/*
* swap a short
*/
static uint16_t
_cdf_tole2(uint16_t sv)
{
uint16_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[1];
d[1] = s[0];
return rv;
}
/*
* swap an int
*/
static uint32_t
_cdf_tole4(uint32_t sv)
{
uint32_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[3];
d[1] = s[2];
d[2] = s[1];
d[3] = s[0];
return rv;
}
/*
* swap a quad
*/
static uint64_t
_cdf_tole8(uint64_t sv)
{
uint64_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[7];
d[1] = s[6];
d[2] = s[5];
d[3] = s[4];
d[4] = s[3];
d[5] = s[2];
d[6] = s[1];
d[7] = s[0];
return rv;
}
/*
* grab a uint32_t from a possibly unaligned address, and return it in
* the native host order.
*/
static uint32_t
cdf_getuint32(const uint8_t *p, size_t offs)
{
uint32_t rv;
(void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
return CDF_TOLE4(rv);
}
#define CDF_UNPACK(a) \
(void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a)
#define CDF_UNPACKA(a) \
(void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a)
uint16_t
cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
uint32_t
cdf_tole4(uint32_t sv)
{
return CDF_TOLE4(sv);
}
uint64_t
cdf_tole8(uint64_t sv)
{
return CDF_TOLE8(sv);
}
void
cdf_swap_header(cdf_header_t *h)
{
size_t i;
h->h_magic = CDF_TOLE8(h->h_magic);
h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]);
h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]);
h->h_revision = CDF_TOLE2(h->h_revision);
h->h_version = CDF_TOLE2(h->h_version);
h->h_byte_order = CDF_TOLE2(h->h_byte_order);
h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2);
h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2);
h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat);
h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory);
h->h_min_size_standard_stream =
CDF_TOLE4(h->h_min_size_standard_stream);
h->h_secid_first_sector_in_short_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat);
h->h_num_sectors_in_short_sat =
CDF_TOLE4(h->h_num_sectors_in_short_sat);
h->h_secid_first_sector_in_master_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat);
h->h_num_sectors_in_master_sat =
CDF_TOLE4(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]);
}
void
cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
void
cdf_swap_dir(cdf_directory_t *d)
{
d->d_namelen = CDF_TOLE2(d->d_namelen);
d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child);
d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child);
d->d_storage = CDF_TOLE4((uint32_t)d->d_storage);
d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
d->d_flags = CDF_TOLE4(d->d_flags);
d->d_created = CDF_TOLE8((uint64_t)d->d_created);
d->d_modified = CDF_TOLE8((uint64_t)d->d_modified);
d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector);
d->d_size = CDF_TOLE4(d->d_size);
}
void
cdf_swap_class(cdf_classid_t *d)
{
d->cl_dword = CDF_TOLE4(d->cl_dword);
d->cl_word[0] = CDF_TOLE2(d->cl_word[0]);
d->cl_word[1] = CDF_TOLE2(d->cl_word[1]);
}
void
cdf_unpack_dir(cdf_directory_t *d, char *buf)
{
size_t len = 0;
CDF_UNPACKA(d->d_name);
CDF_UNPACK(d->d_namelen);
CDF_UNPACK(d->d_type);
CDF_UNPACK(d->d_color);
CDF_UNPACK(d->d_left_child);
CDF_UNPACK(d->d_right_child);
CDF_UNPACK(d->d_storage);
CDF_UNPACKA(d->d_storage_uuid);
CDF_UNPACK(d->d_flags);
CDF_UNPACK(d->d_created);
CDF_UNPACK(d->d_modified);
CDF_UNPACK(d->d_stream_first_sector);
CDF_UNPACK(d->d_size);
CDF_UNPACK(d->d_unused0);
}
static int
cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
(void)&line;
if (e >= b && (size_t)(e - b) <= ss * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
ss * sst->sst_len, ss, sst->sst_len));
errno = EFTYPE;
return -1;
}
static ssize_t
cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len)
{
size_t siz = (size_t)off + len;
if ((off_t)(off + len) != (off_t)siz) {
errno = EINVAL;
return -1;
}
if (info->i_buf != NULL && info->i_len >= siz) {
(void)memcpy(buf, &info->i_buf[off], len);
return (ssize_t)len;
}
if (info->i_fd == -1)
return -1;
if (FINFO_LSEEK_FUNC(info->i_fd, off, SEEK_SET) == (off_t)-1)
return -1;
if (FINFO_READ_FUNC(info->i_fd, buf, len) != (ssize_t)len)
return -1;
return (ssize_t)len;
}
int
cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
{
char buf[512];
(void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1)
return -1;
cdf_unpack_header(h, buf);
cdf_swap_header(h);
if (h->h_magic != CDF_MAGIC) {
DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%"
INT64_T_FORMAT "x\n",
(unsigned long long)h->h_magic,
(unsigned long long)CDF_MAGIC));
goto out;
}
if (h->h_sec_size_p2 > 20) {
DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2));
goto out;
}
if (h->h_short_sec_size_p2 > 20) {
DPRINTF(("Bad short sector size 0x%u\n",
h->h_short_sec_size_p2));
goto out;
}
return 0;
out:
errno = EFTYPE;
return -1;
}
ssize_t
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);
}
ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos + len, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
/*
* Read the sector allocation table.
*/
int
cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat)
{
size_t i, j, k;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t *msa, mid, sec;
size_t nsatpersec = (ss / sizeof(mid)) - 1;
for (i = 0; i < __arraycount(h->h_master_sat); i++)
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
#define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss))
if ((nsatpersec > 0 &&
h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) ||
i > CDF_SEC_LIMIT) {
DPRINTF(("Number of sectors in master SAT too big %u %"
SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i));
errno = EFTYPE;
return -1;
}
sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i;
DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n",
sat->sat_len, ss));
if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss)))
== NULL)
return -1;
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] < 0)
break;
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
h->h_master_sat[i]) != (ssize_t)ss) {
DPRINTF(("Reading sector %d", h->h_master_sat[i]));
goto out1;
}
}
if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL)
goto out1;
mid = h->h_secid_first_sector_in_master_sat;
for (j = 0; j < h->h_num_sectors_in_master_sat; j++) {
if (mid < 0)
goto out;
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Reading master sector loop limit"));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) {
DPRINTF(("Reading master sector %d", mid));
goto out2;
}
for (k = 0; k < nsatpersec; k++, i++) {
sec = CDF_TOLE4((uint32_t)msa[k]);
if (sec < 0)
goto out;
if (i >= sat->sat_len) {
DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT
"u >= %" SIZE_T_FORMAT "u", i, sat->sat_len));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
sec) != (ssize_t)ss) {
DPRINTF(("Reading sector %d",
CDF_TOLE4(msa[k])));
goto out2;
}
}
mid = CDF_TOLE4((uint32_t)msa[nsatpersec]);
}
out:
sat->sat_len = i;
free(msa);
return 0;
out2:
free(msa);
out1:
free(sat->sat_tab);
return -1;
}
size_t
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size)
/ sizeof(maxsector));
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid >= maxsector) {
DPRINTF(("Sector %d >= %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
DPRINTF(("\n"));
return i;
}
int
cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SEC_SIZE(h), i, j;
ssize_t nr;
scn->sst_len = cdf_count_chain(sat, sid, ss);
scn->sst_dirlen = len;
if (scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read long sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading long sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
scn->sst_len));
errno = EFTYPE;
goto out;
}
if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h,
sid)) != (ssize_t)ss) {
if (i == scn->sst_len - 1 && nr > 0) {
/* Last sector might be truncated */
return 0;
}
DPRINTF(("Reading long sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n",
i, scn->sst_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h,
sid) != (ssize_t)ss) {
DPRINTF(("Reading short sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
scn);
else
return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
}
int
cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_dir_t *dir)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h), ns, nd;
char *buf;
cdf_secid_t sid = h->h_secid_first_directory;
ns = cdf_count_chain(sat, sid, ss);
if (ns == (size_t)-1)
return -1;
nd = ss / CDF_DIRECTORY_SIZE;
dir->dir_len = ns * nd;
dir->dir_tab = CAST(cdf_directory_t *,
calloc(dir->dir_len, sizeof(dir->dir_tab[0])));
if (dir->dir_tab == NULL)
return -1;
if ((buf = CAST(char *, malloc(ss))) == NULL) {
free(dir->dir_tab);
return -1;
}
for (j = i = 0; i < ns; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read dir loop limit"));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) {
DPRINTF(("Reading directory sector %d", sid));
goto out;
}
for (j = 0; j < nd; j++) {
cdf_unpack_dir(&dir->dir_tab[i * nd + j],
&buf[j * CDF_DIRECTORY_SIZE]);
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (NEED_SWAP)
for (i = 0; i < dir->dir_len; i++)
cdf_swap_dir(&dir->dir_tab[i]);
free(buf);
return 0;
out:
free(dir->dir_tab);
free(buf);
return -1;
}
int
cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_sat_t *ssat)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h));
if (ssat->sat_len == (size_t)-1)
return -1;
ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss));
if (ssat->sat_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sat sector loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= ssat->sat_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
ssat->sat_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
(ssize_t)ss) {
DPRINTF(("Reading short sat sector %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(ssat->sat_tab);
return -1;
}
int
cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
for (i = 0; i < dir->dir_len; i++)
if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE)
break;
/* If the it is not there, just fake it; some docs don't have it */
if (i == dir->dir_len)
goto out;
d = &dir->dir_tab[i];
/* If the it is not there, just fake it; some docs don't have it */
if (d->d_stream_first_sector < 0)
goto out;
return cdf_read_long_sector_chain(info, h, sat,
d->d_stream_first_sector, d->d_size, scn);
out:
scn->sst_tab = NULL;
scn->sst_len = 0;
scn->sst_dirlen = 0;
return 0;
}
static int
cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return (unsigned char)*d - CDF_TOLE2(*s);
return 0;
}
int
cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
static const char name[] = "\05SummaryInformation";
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name))
== 0)
break;
if (i == 0) {
DPRINTF(("Cannot find summary information section\n"));
errno = ESRCH;
return -1;
}
d = &dir->dir_tab[i - 1];
return cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, scn);
}
int
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t ofs, tail = (i << 1) + 1;
if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t),
__LINE__) == -1)
goto out;
ofs = CDF_GETUINT32(p, tail);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q < p || q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
int
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE4(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info,
count, &maxcount) == -1)
return -1;
return 0;
}
int
cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
{
return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
"%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
id->cl_six[5]);
}
static const struct {
uint32_t v;
const char *n;
} vn[] = {
{ CDF_PROPERTY_CODE_PAGE, "Code page" },
{ CDF_PROPERTY_TITLE, "Title" },
{ CDF_PROPERTY_SUBJECT, "Subject" },
{ CDF_PROPERTY_AUTHOR, "Author" },
{ CDF_PROPERTY_KEYWORDS, "Keywords" },
{ CDF_PROPERTY_COMMENTS, "Comments" },
{ CDF_PROPERTY_TEMPLATE, "Template" },
{ CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" },
{ CDF_PROPERTY_REVISION_NUMBER, "Revision Number" },
{ CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" },
{ CDF_PROPERTY_LAST_PRINTED, "Last Printed" },
{ CDF_PROPERTY_CREATE_TIME, "Create Time/Date" },
{ CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" },
{ CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" },
{ CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" },
{ CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" },
{ CDF_PROPERTY_THUMBNAIL, "Thumbnail" },
{ CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" },
{ CDF_PROPERTY_SECURITY, "Security" },
{ CDF_PROPERTY_LOCALE_ID, "Locale ID" },
};
int
cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
{
size_t i;
for (i = 0; i < __arraycount(vn); i++)
if (vn[i].v == p)
return snprintf(buf, bufsiz, "%s", vn[i].n);
return snprintf(buf, bufsiz, "0x%x", p);
}
int
cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
{
int len = 0;
int days, hours, mins, secs;
ts /= CDF_TIME_PREC;
secs = (int)(ts % 60);
ts /= 60;
mins = (int)(ts % 60);
ts /= 60;
hours = (int)(ts % 24);
ts /= 24;
days = (int)ts;
if (days) {
len += snprintf(buf + len, bufsiz - len, "%dd+", days);
if ((size_t)len >= bufsiz)
return len;
}
if (days || hours) {
len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
if ((size_t)len >= bufsiz)
return len;
}
len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
if ((size_t)len >= bufsiz)
return len;
len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
return len;
}
#ifdef CDF_DEBUG
void
cdf_dump_header(const cdf_header_t *h)
{
size_t i;
#define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b)
#define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \
h->h_ ## b, 1 << h->h_ ## b)
DUMP("%d", revision);
DUMP("%d", version);
DUMP("0x%x", byte_order);
DUMP2("%d", sec_size_p2);
DUMP2("%d", short_sec_size_p2);
DUMP("%d", num_sectors_in_sat);
DUMP("%d", secid_first_directory);
DUMP("%d", min_size_standard_stream);
DUMP("%d", secid_first_sector_in_short_sat);
DUMP("%d", num_sectors_in_short_sat);
DUMP("%d", secid_first_sector_in_master_sat);
DUMP("%d", num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
(void)fprintf(stderr, "%35.35s[%.3zu] = %d\n",
"master_sat", i, h->h_master_sat[i]);
}
}
void
cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
{
size_t i, j, s = size / sizeof(cdf_secid_t);
for (i = 0; i < sat->sat_len; i++) {
(void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
SIZE_T_FORMAT "u: ", prefix, i, i * s);
for (j = 0; j < s; j++) {
(void)fprintf(stderr, "%5d, ",
CDF_TOLE4(sat->sat_tab[s * i + j]));
if ((j + 1) % 10 == 0)
(void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
"u: ", i * s + j + 1);
}
(void)fprintf(stderr, "\n");
}
}
void
cdf_dump(void *v, size_t len)
{
size_t i, j;
unsigned char *p = v;
char abuf[16];
(void)fprintf(stderr, "%.4x: ", 0);
for (i = 0, j = 0; i < len; i++, p++) {
(void)fprintf(stderr, "%.2x ", *p);
abuf[j++] = isprint(*p) ? *p : '.';
if (j == 16) {
j = 0;
abuf[15] = '\0';
(void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ",
abuf, i + 1);
}
}
(void)fprintf(stderr, "\n");
}
void
cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
cdf_dump(sst->sst_tab, ss * sst->sst_len);
}
void
cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir)
{
size_t i, j;
cdf_directory_t *d;
char name[__arraycount(d->d_name)];
cdf_stream_t scn;
struct timeval ts;
static const char *types[] = { "empty", "user storage",
"user stream", "lockbytes", "property", "root storage" };
for (i = 0; i < dir->dir_len; i++) {
d = &dir->dir_tab[i];
for (j = 0; j < sizeof(name); j++)
name[j] = (char)CDF_TOLE2(d->d_name[j]);
(void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n",
i, name);
if (d->d_type < __arraycount(types))
(void)fprintf(stderr, "Type: %s\n", types[d->d_type]);
else
(void)fprintf(stderr, "Type: %d\n", d->d_type);
(void)fprintf(stderr, "Color: %s\n",
d->d_color ? "black" : "red");
(void)fprintf(stderr, "Left child: %d\n", d->d_left_child);
(void)fprintf(stderr, "Right child: %d\n", d->d_right_child);
(void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags);
cdf_timestamp_to_timespec(&ts, d->d_created);
(void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec));
cdf_timestamp_to_timespec(&ts, d->d_modified);
(void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec));
(void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector);
(void)fprintf(stderr, "Size %d\n", d->d_size);
switch (d->d_type) {
case CDF_DIR_TYPE_USER_STORAGE:
(void)fprintf(stderr, "Storage: %d\n", d->d_storage);
break;
case CDF_DIR_TYPE_USER_STREAM:
if (sst == NULL)
break;
if (cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, &scn) == -1) {
warn("Can't read stream for %s at %d len %d",
name, d->d_stream_first_sector, d->d_size);
break;
}
cdf_dump_stream(h, &scn);
free(scn.sst_tab);
break;
default:
break;
}
}
}
void
cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
{
cdf_timestamp_t tp;
struct timeval ts;
char buf[64];
size_t i, j;
for (i = 0; i < count; i++) {
cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
(void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
switch (info[i].pi_type) {
case CDF_NULL:
break;
case CDF_SIGNED16:
(void)fprintf(stderr, "signed 16 [%hd]\n",
info[i].pi_s16);
break;
case CDF_SIGNED32:
(void)fprintf(stderr, "signed 32 [%d]\n",
info[i].pi_s32);
break;
case CDF_UNSIGNED32:
(void)fprintf(stderr, "unsigned 32 [%u]\n",
info[i].pi_u32);
break;
case CDF_FLOAT:
(void)fprintf(stderr, "float [%g]\n",
info[i].pi_f);
break;
case CDF_DOUBLE:
(void)fprintf(stderr, "double [%g]\n",
info[i].pi_d);
break;
case CDF_LENGTH32_STRING:
(void)fprintf(stderr, "string %u [%.*s]\n",
info[i].pi_str.s_len,
info[i].pi_str.s_len, info[i].pi_str.s_buf);
break;
case CDF_LENGTH32_WSTRING:
(void)fprintf(stderr, "string %u [",
info[i].pi_str.s_len);
for (j = 0; j < info[i].pi_str.s_len - 1; j++)
(void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
(void)fprintf(stderr, "]\n");
break;
case CDF_FILETIME:
tp = info[i].pi_tp;
#if defined(PHP_WIN32) && _MSC_VER <= 1500
if (tp < 1000000000000000i64) {
#else
if (tp < 1000000000000000LL) {
#endif
cdf_print_elapsed_time(buf, sizeof(buf), tp);
(void)fprintf(stderr, "timestamp %s\n", buf);
} else {
cdf_timestamp_to_timespec(&ts, tp);
(void)fprintf(stderr, "timestamp %s",
cdf_ctime(&ts.tv_sec));
}
break;
case CDF_CLIPBOARD:
(void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
break;
default:
DPRINTF(("Don't know how to deal with %x\n",
info[i].pi_type));
break;
}
}
}
void
cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst)
{
char buf[128];
cdf_summary_info_header_t ssi;
cdf_property_info_t *info;
size_t count;
(void)&h;
if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1)
return;
(void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order);
(void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff,
ssi.si_os_version >> 8);
(void)fprintf(stderr, "Os %d\n", ssi.si_os);
cdf_print_classid(buf, sizeof(buf), &ssi.si_class);
(void)fprintf(stderr, "Class %s\n", buf);
(void)fprintf(stderr, "Count %d\n", ssi.si_count);
cdf_dump_property_info(info, count);
free(info);
}
#endif
#ifdef TEST
int
main(int argc, char *argv[])
{
int i;
cdf_header_t h;
cdf_sat_t sat, ssat;
cdf_stream_t sst, scn;
cdf_dir_t dir;
cdf_info_t info;
if (argc < 2) {
(void)fprintf(stderr, "Usage: %s <filename>\n", getprogname());
return -1;
}
info.i_buf = NULL;
info.i_len = 0;
for (i = 1; i < argc; i++) {
if ((info.i_fd = open(argv[1], O_RDONLY)) == -1)
err(1, "Cannot open `%s'", argv[1]);
if (cdf_read_header(&info, &h) == -1)
err(1, "Cannot read header");
#ifdef CDF_DEBUG
cdf_dump_header(&h);
#endif
if (cdf_read_sat(&info, &h, &sat) == -1)
err(1, "Cannot read sat");
#ifdef CDF_DEBUG
cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h));
#endif
if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1)
err(1, "Cannot read ssat");
#ifdef CDF_DEBUG
cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h));
#endif
if (cdf_read_dir(&info, &h, &sat, &dir) == -1)
err(1, "Cannot read dir");
if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1)
err(1, "Cannot read short stream");
#ifdef CDF_DEBUG
cdf_dump_stream(&h, &sst);
#endif
#ifdef CDF_DEBUG
cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir);
#endif
if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir,
&scn) == -1)
err(1, "Cannot read summary info");
#ifdef CDF_DEBUG
cdf_dump_summary_info(&h, &scn);
#endif
(void)close(info.i_fd);
}
return 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2155_1 |
crossvul-cpp_data_good_4987_3 | /* Just try to read the invalid gd2 image & not crash. */
#include "gd.h"
#include <stdio.h>
#include <stdlib.h>
#include "gdtest.h"
int main()
{
gdImagePtr im;
FILE *fp;
char path[1024];
/* Read the corrupt image. */
sprintf(path, "%s/gd2/invalid_neg_size.gd2", GDTEST_TOP_DIR);
fp = fopen(path, "rb");
if (!fp) {
printf("failed, cannot open file\n");
return 1;
}
im = gdImageCreateFromGd2(fp);
fclose(fp);
/* Should have failed & rejected it. */
return im == NULL ? 0 : 1;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_4987_3 |
crossvul-cpp_data_good_3667_0 | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <unwind.h>
#include <arpa/inet.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/system_properties.h>
#include <sys/types.h>
#include <sys/un.h>
#include "dlmalloc.h"
#include "logd.h"
#include "malloc_debug_common.h"
// This file should be included into the build only when
// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
// macros are defined.
#ifndef MALLOC_LEAK_CHECK
#error MALLOC_LEAK_CHECK is not defined.
#endif // !MALLOC_LEAK_CHECK
// Global variables defined in malloc_debug_common.c
extern int gMallocLeakZygoteChild;
extern pthread_mutex_t gAllocationsMutex;
extern HashTable gHashTable;
extern const MallocDebug __libc_malloc_default_dispatch;
extern const MallocDebug* __libc_malloc_dispatch;
// =============================================================================
// log functions
// =============================================================================
#define debug_log(format, ...) \
__libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak_check", (format), ##__VA_ARGS__ )
#define error_log(format, ...) \
__libc_android_log_print(ANDROID_LOG_ERROR, "malloc_leak_check", (format), ##__VA_ARGS__ )
#define info_log(format, ...) \
__libc_android_log_print(ANDROID_LOG_INFO, "malloc_leak_check", (format), ##__VA_ARGS__ )
static int gTrapOnError = 1;
#define MALLOC_ALIGNMENT 8
#define GUARD 0x48151642
#define DEBUG 0
// =============================================================================
// Structures
// =============================================================================
typedef struct AllocationEntry AllocationEntry;
struct AllocationEntry {
HashEntry* entry;
uint32_t guard;
};
// =============================================================================
// Hash Table functions
// =============================================================================
static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
{
if (backtrace == NULL) return 0;
int hash = 0;
size_t i;
for (i = 0 ; i < numEntries ; i++) {
hash = (hash * 33) + (backtrace[i] >> 2);
}
return hash;
}
static HashEntry* find_entry(HashTable* table, int slot,
intptr_t* backtrace, size_t numEntries, size_t size)
{
HashEntry* entry = table->slots[slot];
while (entry != NULL) {
//debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
// backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
/*
* See if the entry matches exactly. We compare the "size" field,
* including the flag bits.
*/
if (entry->size == size && entry->numEntries == numEntries &&
!memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
return entry;
}
entry = entry->next;
}
return NULL;
}
static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
{
size_t hash = get_hash(backtrace, numEntries);
size_t slot = hash % HASHTABLE_SIZE;
if (size & SIZE_FLAG_MASK) {
debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
abort();
}
if (gMallocLeakZygoteChild)
size |= SIZE_FLAG_ZYGOTE_CHILD;
HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
if (entry != NULL) {
entry->allocations++;
} else {
// create a new entry
entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
if (!entry)
return NULL;
entry->allocations = 1;
entry->slot = slot;
entry->prev = NULL;
entry->next = gHashTable.slots[slot];
entry->numEntries = numEntries;
entry->size = size;
memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
gHashTable.slots[slot] = entry;
if (entry->next != NULL) {
entry->next->prev = entry;
}
// we just added an entry, increase the size of the hashtable
gHashTable.count++;
}
return entry;
}
static int is_valid_entry(HashEntry* entry)
{
if (entry != NULL) {
int i;
for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
HashEntry* e1 = gHashTable.slots[i];
while (e1 != NULL) {
if (e1 == entry) {
return 1;
}
e1 = e1->next;
}
}
}
return 0;
}
static void remove_entry(HashEntry* entry)
{
HashEntry* prev = entry->prev;
HashEntry* next = entry->next;
if (prev != NULL) entry->prev->next = next;
if (next != NULL) entry->next->prev = prev;
if (prev == NULL) {
// we are the head of the list. set the head to be next
gHashTable.slots[entry->slot] = entry->next;
}
// we just removed and entry, decrease the size of the hashtable
gHashTable.count--;
}
// =============================================================================
// stack trace functions
// =============================================================================
typedef struct
{
size_t count;
intptr_t* addrs;
} stack_crawl_state_t;
/* depends how the system includes define this */
#ifdef HAVE_UNWIND_CONTEXT_STRUCT
typedef struct _Unwind_Context __unwind_context;
#else
typedef _Unwind_Context __unwind_context;
#endif
static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
{
stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
if (state->count) {
intptr_t ip = (intptr_t)_Unwind_GetIP(context);
if (ip) {
state->addrs[0] = ip;
state->addrs++;
state->count--;
return _URC_NO_REASON;
}
}
/*
* If we run out of space to record the address or 0 has been seen, stop
* unwinding the stack.
*/
return _URC_END_OF_STACK;
}
static inline
int get_backtrace(intptr_t* addrs, size_t max_entries)
{
stack_crawl_state_t state;
state.count = max_entries;
state.addrs = (intptr_t*)addrs;
_Unwind_Backtrace(trace_function, (void*)&state);
return max_entries - state.count;
}
// =============================================================================
// malloc check functions
// =============================================================================
#define CHK_FILL_FREE 0xef
#define CHK_SENTINEL_VALUE (char)0xeb
#define CHK_SENTINEL_HEAD_SIZE 16
#define CHK_SENTINEL_TAIL_SIZE 16
#define CHK_OVERHEAD_SIZE ( CHK_SENTINEL_HEAD_SIZE + \
CHK_SENTINEL_TAIL_SIZE + \
sizeof(size_t) )
static void dump_stack_trace()
{
intptr_t addrs[20];
int c = get_backtrace(addrs, 20);
char buf[16];
char tmp[16*20];
int i;
tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
for (i=0 ; i<c; i++) {
snprintf(buf, sizeof buf, "%2d: %08x\n", i, addrs[i]);
strlcat(tmp, buf, sizeof tmp);
}
__libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
}
static int is_valid_malloc_pointer(void* addr)
{
return 1;
}
static void assert_log_message(const char* format, ...)
{
va_list args;
pthread_mutex_lock(&gAllocationsMutex);
{
const MallocDebug* current_dispatch = __libc_malloc_dispatch;
__libc_malloc_dispatch = &__libc_malloc_default_dispatch;
va_start(args, format);
__libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
format, args);
va_end(args);
dump_stack_trace();
if (gTrapOnError) {
__builtin_trap();
}
__libc_malloc_dispatch = current_dispatch;
}
pthread_mutex_unlock(&gAllocationsMutex);
}
static void assert_valid_malloc_pointer(void* mem)
{
if (mem && !is_valid_malloc_pointer(mem)) {
assert_log_message(
"*** MALLOC CHECK: buffer %p, is not a valid "
"malloc pointer (are you mixing up new/delete "
"and malloc/free?)", mem);
}
}
/* Check that a given address corresponds to a guarded block,
* and returns its original allocation size in '*allocated'.
* 'func' is the capitalized name of the caller function.
* Returns 0 on success, or -1 on failure.
* NOTE: Does not return if gTrapOnError is set.
*/
static int chk_mem_check(void* mem,
size_t* allocated,
const char* func)
{
char* buffer;
size_t offset, bytes;
int i;
char* buf;
/* first check the bytes in the sentinel header */
buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
if (buf[i] != CHK_SENTINEL_VALUE) {
assert_log_message(
"*** %s CHECK: buffer %p "
"corrupted %d bytes before allocation",
func, mem, CHK_SENTINEL_HEAD_SIZE-i);
return -1;
}
}
/* then the ones in the sentinel trailer */
buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
bytes = *(size_t *)(buffer + offset);
buf = (char*)mem + bytes;
for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
if (buf[i] != CHK_SENTINEL_VALUE) {
assert_log_message(
"*** %s CHECK: buffer %p, size=%lu, "
"corrupted %d bytes after allocation",
func, buffer, bytes, i+1);
return -1;
}
}
*allocated = bytes;
return 0;
}
void* chk_malloc(size_t bytes)
{
size_t size = bytes + CHK_OVERHEAD_SIZE;
if (size < bytes) { // Overflow.
return NULL;
}
uint8_t* buffer = (uint8_t*) dlmalloc(size);
if (buffer) {
memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
*(size_t *)(buffer + offset) = bytes;
buffer += CHK_SENTINEL_HEAD_SIZE;
}
return buffer;
}
void chk_free(void* mem)
{
assert_valid_malloc_pointer(mem);
if (mem) {
size_t size;
char* buffer;
if (chk_mem_check(mem, &size, "FREE") == 0) {
buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
memset(buffer, CHK_FILL_FREE, size + CHK_OVERHEAD_SIZE);
dlfree(buffer);
}
}
}
void* chk_calloc(size_t n_elements, size_t elem_size)
{
size_t size;
void* ptr;
/* Fail on overflow - just to be safe even though this code runs only
* within the debugging C library, not the production one */
if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
return NULL;
}
size = n_elements * elem_size;
ptr = chk_malloc(size);
if (ptr != NULL) {
memset(ptr, 0, size);
}
return ptr;
}
void* chk_realloc(void* mem, size_t bytes)
{
char* buffer;
int ret;
size_t old_bytes = 0;
assert_valid_malloc_pointer(mem);
if (mem != NULL && chk_mem_check(mem, &old_bytes, "REALLOC") < 0)
return NULL;
char* new_buffer = chk_malloc(bytes);
if (mem == NULL) {
return new_buffer;
}
if (new_buffer) {
if (bytes > old_bytes)
bytes = old_bytes;
memcpy(new_buffer, mem, bytes);
chk_free(mem);
}
return new_buffer;
}
void* chk_memalign(size_t alignment, size_t bytes)
{
// XXX: it's better to use malloc, than being wrong
return chk_malloc(bytes);
}
// =============================================================================
// malloc fill functions
// =============================================================================
void* fill_malloc(size_t bytes)
{
void* buffer = dlmalloc(bytes);
if (buffer) {
memset(buffer, CHK_SENTINEL_VALUE, bytes);
}
return buffer;
}
void fill_free(void* mem)
{
size_t bytes = dlmalloc_usable_size(mem);
memset(mem, CHK_FILL_FREE, bytes);
dlfree(mem);
}
void* fill_realloc(void* mem, size_t bytes)
{
void* buffer = fill_malloc(bytes);
if (mem == NULL) {
return buffer;
}
if (buffer) {
size_t old_size = dlmalloc_usable_size(mem);
size_t size = (bytes < old_size)?(bytes):(old_size);
memcpy(buffer, mem, size);
fill_free(mem);
}
return buffer;
}
void* fill_memalign(size_t alignment, size_t bytes)
{
void* buffer = dlmemalign(alignment, bytes);
if (buffer) {
memset(buffer, CHK_SENTINEL_VALUE, bytes);
}
return buffer;
}
// =============================================================================
// malloc leak functions
// =============================================================================
#define MEMALIGN_GUARD ((void*)0xA1A41520)
void* leak_malloc(size_t bytes)
{
// allocate enough space infront of the allocation to store the pointer for
// the alloc structure. This will making free'ing the structer really fast!
// 1. allocate enough memory and include our header
// 2. set the base pointer to be right after our header
size_t size = bytes + sizeof(AllocationEntry);
if (size < bytes) { // Overflow.
return NULL;
}
void* base = dlmalloc(size);
if (base != NULL) {
pthread_mutex_lock(&gAllocationsMutex);
intptr_t backtrace[BACKTRACE_SIZE];
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
AllocationEntry* header = (AllocationEntry*)base;
header->entry = record_backtrace(backtrace, numEntries, bytes);
header->guard = GUARD;
// now increment base to point to after our header.
// this should just work since our header is 8 bytes.
base = (AllocationEntry*)base + 1;
pthread_mutex_unlock(&gAllocationsMutex);
}
return base;
}
void leak_free(void* mem)
{
if (mem != NULL) {
pthread_mutex_lock(&gAllocationsMutex);
// check the guard to make sure it is valid
AllocationEntry* header = (AllocationEntry*)mem - 1;
if (header->guard != GUARD) {
// could be a memaligned block
if (((void**)mem)[-1] == MEMALIGN_GUARD) {
mem = ((void**)mem)[-2];
header = (AllocationEntry*)mem - 1;
}
}
if (header->guard == GUARD || is_valid_entry(header->entry)) {
// decrement the allocations
HashEntry* entry = header->entry;
entry->allocations--;
if (entry->allocations <= 0) {
remove_entry(entry);
dlfree(entry);
}
// now free the memory!
dlfree(header);
} else {
debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
header->guard, header->entry);
}
pthread_mutex_unlock(&gAllocationsMutex);
}
}
void* leak_calloc(size_t n_elements, size_t elem_size)
{
size_t size;
void* ptr;
/* Fail on overflow - just to be safe even though this code runs only
* within the debugging C library, not the production one */
if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
return NULL;
}
size = n_elements * elem_size;
ptr = leak_malloc(size);
if (ptr != NULL) {
memset(ptr, 0, size);
}
return ptr;
}
void* leak_realloc(void* oldMem, size_t bytes)
{
if (oldMem == NULL) {
return leak_malloc(bytes);
}
void* newMem = NULL;
AllocationEntry* header = (AllocationEntry*)oldMem - 1;
if (header && header->guard == GUARD) {
size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
newMem = leak_malloc(bytes);
if (newMem != NULL) {
size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
memcpy(newMem, oldMem, copySize);
leak_free(oldMem);
}
} else {
newMem = dlrealloc(oldMem, bytes);
}
return newMem;
}
void* leak_memalign(size_t alignment, size_t bytes)
{
// we can just use malloc
if (alignment <= MALLOC_ALIGNMENT)
return leak_malloc(bytes);
// need to make sure it's a power of two
if (alignment & (alignment-1))
alignment = 1L << (31 - __builtin_clz(alignment));
// here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
// we will align by at least MALLOC_ALIGNMENT bytes
// and at most alignment-MALLOC_ALIGNMENT bytes
size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
if (size < bytes) { // Overflow.
return NULL;
}
void* base = leak_malloc(size);
if (base != NULL) {
intptr_t ptr = (intptr_t)base;
if ((ptr % alignment) == 0)
return base;
// align the pointer
ptr += ((-ptr) % alignment);
// there is always enough space for the base pointer and the guard
((void**)ptr)[-1] = MEMALIGN_GUARD;
((void**)ptr)[-2] = base;
return (void*)ptr;
}
return base;
}
/* Initializes malloc debugging framework.
* See comments on MallocDebugInit in malloc_debug_common.h
*/
int malloc_debug_initialize(void)
{
// We don't really have anything that requires initialization here.
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3667_0 |
crossvul-cpp_data_good_3541_1 | /*
* Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
unsigned long mmcr[3];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned int group_flag;
int n_txn_start;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
/*
* Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
* and a few other names are different.
*/
#ifdef CONFIG_PPC32
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
#define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
}
static inline void perf_read_regs(struct pt_regs *regs) { }
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_PPC32 */
/*
* Things that are specific to 64-bit implementations.
*/
#ifdef CONFIG_PPC64
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
return 0;
}
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
* bit in MMCRA.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
*addrp = mfspr(SPRN_SDAR);
}
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
unsigned long sihv = MMCRA_SIHV;
unsigned long sipr = MMCRA_SIPR;
if (TRAP(regs) != 0xf00)
return 0; /* not a PMU interrupt */
if (ppmu->flags & PPMU_ALT_SIPR) {
sihv = POWER6_MMCRA_SIHV;
sipr = POWER6_MMCRA_SIPR;
}
/* PR has priority over HV, so order below is important */
if (mmcra & sipr)
return PERF_RECORD_MISC_USER;
if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
*/
static inline void perf_read_regs(struct pt_regs *regs)
{
regs->dsisr = mfspr(SPRN_MMCRA);
}
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return !regs->softe;
}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
void perf_event_print_debug(void)
{
}
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 1:
val = mfspr(SPRN_PMC1);
break;
case 2:
val = mfspr(SPRN_PMC2);
break;
case 3:
val = mfspr(SPRN_PMC3);
break;
case 4:
val = mfspr(SPRN_PMC4);
break;
case 5:
val = mfspr(SPRN_PMC5);
break;
case 6:
val = mfspr(SPRN_PMC6);
break;
#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 1:
mtspr(SPRN_PMC1, val);
break;
case 2:
mtspr(SPRN_PMC2, val);
break;
case 3:
mtspr(SPRN_PMC3, val);
break;
case 4:
mtspr(SPRN_PMC4, val);
break;
case 5:
mtspr(SPRN_PMC5, val);
break;
case 6:
mtspr(SPRN_PMC6, val);
break;
#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
}
/*
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event_id[].
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
&& !ppmu->limited_pmc_event(event_id[i])) {
ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0]))
return -1;
}
value = mask = 0;
for (i = 0; i < n_ev; ++i) {
nv = (value | cpuhw->avalues[i][0]) +
(value & cpuhw->avalues[i][0] & addf);
if ((((nv + tadd) ^ value) & mask) != 0 ||
(((nv + tadd) ^ cpuhw->avalues[i][0]) &
cpuhw->amasks[i][0]) != 0)
break;
value = nv;
mask |= cpuhw->amasks[i][0];
}
if (i == n_ev)
return 0; /* all OK */
/* doesn't work, gather alternatives... */
if (!ppmu->get_alternatives)
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
&cpuhw->avalues[i][j]);
}
/* enumerate all possibilities and see if any will work */
i = 0;
j = -1;
value = mask = nv = 0;
while (i < n_ev) {
if (j >= 0) {
/* we're backtracking, restore context */
value = svalues[i];
mask = smasks[i];
j = choice[i];
}
/*
* See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
nv = (value | cpuhw->avalues[i][j]) +
(value & cpuhw->avalues[i][j] & addf);
if ((((nv + tadd) ^ value) & mask) == 0 &&
(((nv + tadd) ^ cpuhw->avalues[i][j])
& cpuhw->amasks[i][j]) == 0)
break;
}
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
* to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
* Found a feasible alternative for event_id i,
* remember where we got up to with this event_id,
* go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
svalues[i] = value;
smasks[i] = mask;
value = nv;
mask |= cpuhw->amasks[i][j];
++i;
j = -1;
}
}
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
* Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
* added events.
*/
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
struct perf_event *event;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; ++i) {
if (cflags[i] & PPMU_LIMITED_PMC_OK) {
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
event = ctrs[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
if (eu || ek || eh)
for (i = 0; i < n; ++i)
if (cflags[i] & PPMU_LIMITED_PMC_OK)
cflags[i] |= PPMU_LIMITED_PMC_REQD;
return 0;
}
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
if (!event->hw.idx)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
* us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
return (ppmu->flags & PPMU_LIMITED_PMC5_6)
&& (pmcnum == 5 || pmcnum == 6);
}
static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
if (!event->hw.idx)
continue;
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
}
}
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
/*
* Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
* other events. We try to keep the values from the limited
* events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
* the limited events as small and consistent as possible.
* Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
if (!cpuhw->n_limited) {
mtspr(SPRN_MMCR0, mmcr0);
return;
}
/*
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
* events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
: "=&r" (pmc5), "=&r" (pmc6)
: "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
"i" (SPRN_MMCR0),
"i" (SPRN_PMC5), "i" (SPRN_PMC6));
if (mmcr0 & MMCR0_FC)
freeze_limited_counters(cpuhw, pmc5, pmc6);
else
thaw_limited_counters(cpuhw, pmc5, pmc6);
/*
* Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
mtspr(SPRN_MMCR0, mmcr0);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
/*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mtspr(SPRN_MMCRA,
cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mb();
}
/*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
mb();
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val;
s64 left;
unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
}
cpuhw->disabled = 0;
/*
* If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_events == 0)
ppc_set_pmu_inuse(0);
goto out_enable;
}
/*
* Compute MMCR* values for the new set of events
*/
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
cpuhw->mmcr)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
/*
* Add in MMCR0 freeze bits corresponding to the
* attr.exclude_* bits for the first event.
* We have already checked that all events have the
* same values for these bits as the first event.
*/
event = cpuhw->event[0];
if (event->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
if (event->attr.exclude_kernel)
cpuhw->mmcr[0] |= freeze_events_kernel;
if (event->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
* Write the new configuration to MMCR* with the freeze
* bit set and set the hardware events to their initial values.
* Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
/*
* Read off any pre-existing events that need to move
* to another PMC.
*/
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
power_pmu_read(event);
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
}
/*
* Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
cpuhw->limited_counter[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
val = 0;
if (event->hw.sample_period) {
left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
if (event->hw.state & PERF_HES_STOPPED)
val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
out_enable:
mb();
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
/*
* Enable instruction sampling if necessary
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mb();
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
flags[n] = event->hw.event_base;
events[n++] = event->hw.config;
}
}
return n;
}
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
cpuhw = &__get_cpu_var(cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN)
goto nocheck;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
event->hw.config = cpuhw->events[n0];
nocheck:
++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
/*
* Remove a event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
cpuhw = &__get_cpu_var(cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
cpuhw->event[i-1] = cpuhw->event[i];
cpuhw->events[i-1] = cpuhw->events[i];
cpuhw->flags[i-1] = cpuhw->flags[i];
}
--cpuhw->n_events;
ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
if (event->hw.idx) {
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
if (event == cpuhw->limited_counter[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
}
--cpuhw->n_limited;
}
if (cpuhw->n_events == 0) {
/* disable exceptions if no events are running */
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* POWER-PMU does not support disabling individual counters, hence
* program their cycle counter to their max value and ignore the interrupts.
*/
static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void power_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
if (!ppmu)
return -EAGAIN;
cpuhw = &__get_cpu_var(cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
if (i < 0)
return -EAGAIN;
for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
* A event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
if (event->attr.exclude_user
|| event->attr.exclude_kernel
|| event->attr.exclude_hv
|| event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
* The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
return 0;
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
return n > 0;
}
/*
* Find an alternative event_id that goes on a normal PMC, if possible,
* and return the event_id code, or 0 if there is no such alternative.
* (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
n = ppmu->get_alternatives(ev, flags, alt);
if (!n)
return 0;
return alt[0];
}
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
struct cpu_hw_events *cpuhw;
if (!ppmu)
return -ENOENT;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config_base = ev;
event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
* exclude_hv bit to 0 so that we don't care what
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
event->attr.exclude_hv = 0;
/*
* If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
* If this machine has limited events, check whether this
* event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
* The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
return -EINVAL;
}
}
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
event->destroy = hw_perf_event_destroy;
return err;
}
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
.event_init = power_pmu_event_init,
.add = power_pmu_add,
.del = power_pmu_del,
.start = power_pmu_start,
.stop = power_pmu_stop,
.read = power_pmu_read,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL);
data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
if (perf_event_overflow(event, nmi, &data, regs))
power_pmu_stop(event, 0);
}
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
u32 flags = perf_get_misc_flags(regs);
if (flags)
return flags;
return user_mode(regs) ? PERF_RECORD_MISC_USER :
PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
* for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
static bool pmc_overflow(unsigned long val)
{
if ((int)val < 0)
return true;
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
* raise a performance monitor exception. We need to catch this to
* ensure we reset the PMC. In all cases the PMC will be 256 or less
* cycles from overflow.
*
* We only do this if the first pass fails to find any overflowing
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
return true;
return false;
}
/*
* Performance monitor interrupt stuff
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
}
static void power_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
static int __cpuinit
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
power_pmu_setup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
int register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(power_pmu_notifier);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3541_1 |
crossvul-cpp_data_bad_1616_0 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
*
* This file implements the protocol specific parts of the USB service for a PD.
* -----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
#include "ozusbsvc.h"
#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
/*
* Context: softirq
*/
static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
{
int ret;
struct oz_elt *elt = (struct oz_elt *)ei->data;
struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_USB;
ei->length = elt->length + sizeof(struct oz_elt);
app_hdr->app_id = OZ_APPID_USB;
spin_lock_bh(&eb->lock);
if (isoc == 0) {
app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
if (usb_ctx->tx_seq_num == 0)
usb_ctx->tx_seq_num = 1;
}
ret = oz_queue_elt_info(eb, isoc, strid, ei);
if (ret)
oz_elt_info_free(eb, ei);
spin_unlock_bh(&eb->lock);
return ret;
}
/*
* Context: softirq
*/
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
u8 index, __le16 windex, int offset, int len)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_get_desc_req *body;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
oz_dbg(ON, " req_type = 0x%x\n", req_type);
oz_dbg(ON, " desc_type = 0x%x\n", desc_type);
oz_dbg(ON, " index = 0x%x\n", index);
oz_dbg(ON, " windex = 0x%x\n", windex);
oz_dbg(ON, " offset = 0x%x\n", offset);
oz_dbg(ON, " len = 0x%x\n", len);
if (len > 200)
len = 200;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_get_desc_req);
body = (struct oz_get_desc_req *)(elt+1);
body->type = OZ_GET_DESC_REQ;
body->req_id = req_id;
put_unaligned(cpu_to_le16(offset), &body->offset);
put_unaligned(cpu_to_le16(len), &body->size);
body->req_type = req_type;
body->desc_type = desc_type;
body->w_index = windex;
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_config_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_config_req);
body = (struct oz_set_config_req *)(elt+1);
body->type = OZ_SET_CONFIG_REQ;
body->req_id = req_id;
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_interface_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_interface_req);
body = (struct oz_set_interface_req *)(elt+1);
body->type = OZ_SET_INTERFACE_REQ;
body->req_id = req_id;
body->index = index;
body->alternative = alt;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
u8 recipient, u8 index, __le16 feature)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_feature_req);
body = (struct oz_feature_req *)(elt+1);
body->type = type;
body->req_id = req_id;
body->recipient = recipient;
body->index = index;
put_unaligned(feature, &body->feature);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt *elt;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_vendor_class_req *body;
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
body = (struct oz_vendor_class_req *)(elt+1);
body->type = OZ_VENDOR_CLASS_REQ;
body->req_id = req_id;
body->req_type = req_type;
body->request = request;
put_unaligned(value, &body->value);
put_unaligned(index, &body->index);
if (data_len)
memcpy(body->data, data, data_len);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
/*
* Context: tasklet
*/
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
const u8 *data, int data_len)
{
unsigned wvalue = le16_to_cpu(setup->wValue);
unsigned windex = le16_to_cpu(setup->wIndex);
unsigned wlength = le16_to_cpu(setup->wLength);
int rc = 0;
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
rc = oz_usb_get_desc_req(hpd, req_id,
setup->bRequestType, (u8)(wvalue>>8),
(u8)wvalue, setup->wIndex, 0, wlength);
break;
case USB_REQ_SET_CONFIGURATION:
rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
break;
case USB_REQ_SET_INTERFACE: {
u8 if_num = (u8)windex;
u8 alt = (u8)wvalue;
rc = oz_usb_set_interface_req(hpd, req_id,
if_num, alt);
}
break;
case USB_REQ_SET_FEATURE:
rc = oz_usb_set_clear_feature_req(hpd, req_id,
OZ_SET_FEATURE_REQ,
setup->bRequestType & 0xf, (u8)windex,
setup->wValue);
break;
case USB_REQ_CLEAR_FEATURE:
rc = oz_usb_set_clear_feature_req(hpd, req_id,
OZ_CLEAR_FEATURE_REQ,
setup->bRequestType & 0xf,
(u8)windex, setup->wValue);
break;
}
} else {
rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
setup->bRequest, setup->wValue, setup->wIndex,
data, data_len);
}
return rc;
}
/*
* Context: softirq
*/
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
{
struct oz_usb_ctx *usb_ctx = hpd;
struct oz_pd *pd = usb_ctx->pd;
struct oz_elt_buf *eb;
int i;
int hdr_size;
u8 *data;
struct usb_iso_packet_descriptor *desc;
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
for (i = 0; i < urb->number_of_packets; i++) {
u8 *data;
desc = &urb->iso_frame_desc[i];
data = ((u8 *)urb->transfer_buffer)+desc->offset;
oz_send_isoc_unit(pd, ep_num, data, desc->length);
}
return 0;
}
hdr_size = sizeof(struct oz_isoc_fixed) - 1;
eb = &pd->elt_buff;
i = 0;
while (i < urb->number_of_packets) {
struct oz_elt_info *ei = oz_elt_info_alloc(eb);
struct oz_elt *elt;
struct oz_isoc_fixed *body;
int unit_count;
int unit_size;
int rem;
if (ei == NULL)
return -1;
rem = MAX_ISOC_FIXED_DATA;
elt = (struct oz_elt *)ei->data;
body = (struct oz_isoc_fixed *)(elt + 1);
body->type = OZ_USB_ENDPOINT_DATA;
body->endpoint = ep_num;
body->format = OZ_DATA_F_ISOC_FIXED;
unit_size = urb->iso_frame_desc[i].length;
body->unit_size = (u8)unit_size;
data = ((u8 *)(elt+1)) + hdr_size;
unit_count = 0;
while (i < urb->number_of_packets) {
desc = &urb->iso_frame_desc[i];
if ((unit_size == desc->length) &&
(desc->length <= rem)) {
memcpy(data, ((u8 *)urb->transfer_buffer) +
desc->offset, unit_size);
data += unit_size;
rem -= unit_size;
unit_count++;
desc->status = 0;
desc->actual_length = desc->length;
i++;
} else {
break;
}
}
elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
/* Store the number of units in body->frame_number for the
* moment. This field will be correctly determined before
* the element is sent. */
body->frame_number = (u8)unit_count;
oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
pd->mode & OZ_F_ISOC_ANYTIME);
}
return 0;
}
/*
* Context: softirq-serialized
*/
static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
switch (data_hdr->format) {
case OZ_DATA_F_MULTIPLE_FIXED: {
struct oz_multiple_fixed *body =
(struct oz_multiple_fixed *)data_hdr;
u8 *data = body->data;
int n = (len - sizeof(struct oz_multiple_fixed)+1)
/ body->unit_size;
while (n--) {
oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
data, body->unit_size);
data += body->unit_size;
}
}
break;
case OZ_DATA_F_ISOC_FIXED: {
struct oz_isoc_fixed *body =
(struct oz_isoc_fixed *)data_hdr;
int data_len = len-sizeof(struct oz_isoc_fixed)+1;
int unit_size = body->unit_size;
u8 *data = body->data;
int count;
int i;
if (!unit_size)
break;
count = data_len/unit_size;
for (i = 0; i < count; i++) {
oz_hcd_data_ind(usb_ctx->hport,
body->endpoint, data, unit_size);
data += unit_size;
}
}
break;
}
}
/*
* This is called when the PD has received a USB element. The type of element
* is determined and is then passed to an appropriate handler function.
* Context: softirq-serialized
*/
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
{
struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
struct oz_usb_ctx *usb_ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (usb_ctx->stopped)
goto done;
/* If sequence number is non-zero then check it is not a duplicate.
* Zero sequence numbers are always accepted.
*/
if (usb_hdr->elt_seq_num != 0) {
if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
/* Reject duplicate element. */
goto done;
}
usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
switch (usb_hdr->type) {
case OZ_GET_DESC_RSP: {
struct oz_get_desc_rsp *body =
(struct oz_get_desc_rsp *)usb_hdr;
u16 offs, total_size;
u8 data_len;
if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
break;
data_len = elt->length -
(sizeof(struct oz_get_desc_rsp) - 1);
offs = le16_to_cpu(get_unaligned(&body->offset));
total_size =
le16_to_cpu(get_unaligned(&body->total_size));
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data,
data_len, offs, total_size);
}
break;
case OZ_SET_CONFIG_RSP: {
struct oz_set_config_rsp *body =
(struct oz_set_config_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
body->rcode, NULL, 0);
}
break;
case OZ_SET_INTERFACE_RSP: {
struct oz_set_interface_rsp *body =
(struct oz_set_interface_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport,
body->req_id, body->rcode, NULL, 0);
}
break;
case OZ_VENDOR_CLASS_RSP: {
struct oz_vendor_class_rsp *body =
(struct oz_vendor_class_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data, elt->length-
sizeof(struct oz_vendor_class_rsp)+1);
}
break;
case OZ_USB_ENDPOINT_DATA:
oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
break;
}
done:
oz_usb_put(usb_ctx);
}
/*
* Context: softirq, process
*/
void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
{
struct oz_usb_ctx *usb_ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (!usb_ctx->stopped) {
oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num);
oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
}
oz_usb_put(usb_ctx);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_1616_0 |
crossvul-cpp_data_bad_4987_3 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-189/c/bad_4987_3 |
crossvul-cpp_data_good_3484_0 | /*
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
*
* Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
* PMC-Sierra Inc
*
* Copyright (C) 2008, 2009 PMC Sierra Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
* USA
*
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hdreg.h>
#include <linux/version.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/processor.h>
#include <linux/libata.h>
#include <linux/mutex.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsicam.h>
#include "pmcraid.h"
/*
* Module configuration parameters
*/
static unsigned int pmcraid_debug_log;
static unsigned int pmcraid_disable_aen;
static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
static unsigned int pmcraid_enable_msix;
/*
* Data structures to support multiple adapters by the LLD.
* pmcraid_adapter_count - count of configured adapters
*/
static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
/*
* Supporting user-level control interface through IOCTL commands.
* pmcraid_major - major number to use
* pmcraid_minor - minor number(s) to use
*/
static unsigned int pmcraid_major;
static struct class *pmcraid_class;
DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
* Module parameters
*/
MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(PMCRAID_DRIVER_VERSION);
module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(log_level,
"Enables firmware error code logging, default :1 high-severity"
" errors, 2: all errors including high-severity errors,"
" 0: disables logging");
module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(debug,
"Enable driver verbose message logging. Set 1 to enable."
"(default: 0)");
module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(disable_aen,
"Disable driver aen notifications to apps. Set 1 to disable."
"(default: 0)");
/* chip specific constants for PMC MaxRAID controllers (same for
* 0x5220 and 0x8010
*/
static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
{
.ioastatus = 0x0,
.ioarrin = 0x00040,
.mailbox = 0x7FC30,
.global_intr_mask = 0x00034,
.ioa_host_intr = 0x0009C,
.ioa_host_intr_clr = 0x000A0,
.ioa_host_msix_intr = 0x7FC40,
.ioa_host_mask = 0x7FC28,
.ioa_host_mask_clr = 0x7FC28,
.host_ioa_intr = 0x00020,
.host_ioa_intr_clr = 0x00020,
.transop_timeout = 300
}
};
/*
* PCI device ids supported by pmcraid driver
*/
static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
{}
};
MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
/**
* pmcraid_slave_alloc - Prepare for commands to a device
* @scsi_dev: scsi device struct
*
* This function is called by mid-layer prior to sending any command to the new
* device. Stores resource entry details of the device in scsi_device struct.
* Queuecommand uses the resource handle and other details to fill up IOARCB
* while sending commands to the device.
*
* Return value:
* 0 on success / -ENXIO if device does not exist
*/
static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *temp, *res = NULL;
struct pmcraid_instance *pinstance;
u8 target, bus, lun;
unsigned long lock_flags;
int rc = -ENXIO;
u16 fw_version;
pinstance = shost_priv(scsi_dev->host);
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
/* Driver exposes VSET and GSCSI resources only; all other device types
* are not exposed. Resource list is synchronized using resource lock
* so any traversal or modifications to the list should be done inside
* this lock
*/
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry(temp, &pinstance->used_res_q, queue) {
/* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
if (RES_IS_VSET(temp->cfg_entry)) {
if (fw_version <= PMCRAID_FW_VERSION_1)
target = temp->cfg_entry.unique_flags1;
else
target = temp->cfg_entry.array_id & 0xFF;
if (target > PMCRAID_MAX_VSET_TARGETS)
continue;
bus = PMCRAID_VSET_BUS_ID;
lun = 0;
} else if (RES_IS_GSCSI(temp->cfg_entry)) {
target = RES_TARGET(temp->cfg_entry.resource_address);
bus = PMCRAID_PHYS_BUS_ID;
lun = RES_LUN(temp->cfg_entry.resource_address);
} else {
continue;
}
if (bus == scsi_dev->channel &&
target == scsi_dev->id &&
lun == scsi_dev->lun) {
res = temp;
break;
}
}
if (res) {
res->scsi_dev = scsi_dev;
scsi_dev->hostdata = res;
res->change_detected = 0;
atomic_set(&res->read_failures, 0);
atomic_set(&res->write_failures, 0);
rc = 0;
}
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
return rc;
}
/**
* pmcraid_slave_configure - Configures a SCSI device
* @scsi_dev: scsi device struct
*
* This function is executed by SCSI mid layer just after a device is first
* scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
* timeout value (default 30s) will be over-written to a higher value (60s)
* and max_sectors value will be over-written to 512. It also sets queue depth
* to host->cmd_per_lun value
*
* Return value:
* 0 on success
*/
static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
if (!res)
return 0;
/* LLD exposes VSETs and Enclosure devices only */
if (RES_IS_GSCSI(res->cfg_entry) &&
scsi_dev->type != TYPE_ENCLOSURE)
return -ENXIO;
pmcraid_info("configuring %x:%x:%x:%x\n",
scsi_dev->host->unique_id,
scsi_dev->channel,
scsi_dev->id,
scsi_dev->lun);
if (RES_IS_GSCSI(res->cfg_entry)) {
scsi_dev->allow_restart = 1;
} else if (RES_IS_VSET(res->cfg_entry)) {
scsi_dev->allow_restart = 1;
blk_queue_rq_timeout(scsi_dev->request_queue,
PMCRAID_VSET_IO_TIMEOUT);
blk_queue_max_hw_sectors(scsi_dev->request_queue,
PMCRAID_VSET_MAX_SECTORS);
}
if (scsi_dev->tagged_supported &&
(RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
scsi_dev->host->cmd_per_lun);
} else {
scsi_adjust_queue_depth(scsi_dev, 0,
scsi_dev->host->cmd_per_lun);
}
return 0;
}
/**
* pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
*
* @scsi_dev: scsi device struct
*
* This is called by mid-layer before removing a device. Pointer assignments
* done in pmcraid_slave_alloc will be reset to NULL here.
*
* Return value
* none
*/
static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res;
res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
if (res)
res->scsi_dev = NULL;
scsi_dev->hostdata = NULL;
}
/**
* pmcraid_change_queue_depth - Change the device's queue depth
* @scsi_dev: scsi device struct
* @depth: depth to set
* @reason: calling context
*
* Return value
* actual depth set
*/
static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth,
int reason)
{
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (depth > PMCRAID_MAX_CMD_PER_LUN)
depth = PMCRAID_MAX_CMD_PER_LUN;
scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
return scsi_dev->queue_depth;
}
/**
* pmcraid_change_queue_type - Change the device's queue type
* @scsi_dev: scsi device struct
* @tag: type of tags to use
*
* Return value:
* actual queue type set
*/
static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
{
struct pmcraid_resource_entry *res;
res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
if ((res) && scsi_dev->tagged_supported &&
(RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
scsi_set_tag_type(scsi_dev, tag);
if (tag)
scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
else
scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
} else
tag = 0;
return tag;
}
/**
* pmcraid_init_cmdblk - initializes a command block
*
* @cmd: pointer to struct pmcraid_cmd to be initialized
* @index: if >=0 first time initialization; otherwise reinitialization
*
* Return Value
* None
*/
void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
{
struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
if (index >= 0) {
/* first time initialization (called from probe) */
u32 ioasa_offset =
offsetof(struct pmcraid_control_block, ioasa);
cmd->index = index;
ioarcb->response_handle = cpu_to_le32(index << 2);
ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
} else {
/* re-initialization of various lengths, called once command is
* processed by IOA
*/
memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->hrrq_id = 0;
ioarcb->request_flags0 = 0;
ioarcb->request_flags1 = 0;
ioarcb->cmd_timeout = 0;
ioarcb->ioarcb_bus_addr &= (~0x1FULL);
ioarcb->ioadl_bus_addr = 0;
ioarcb->ioadl_length = 0;
ioarcb->data_transfer_length = 0;
ioarcb->add_cmd_param_length = 0;
ioarcb->add_cmd_param_offset = 0;
cmd->ioa_cb->ioasa.ioasc = 0;
cmd->ioa_cb->ioasa.residual_data_length = 0;
cmd->time_left = 0;
}
cmd->cmd_done = NULL;
cmd->scsi_cmd = NULL;
cmd->release = 0;
cmd->completion_req = 0;
cmd->sense_buffer = 0;
cmd->sense_buffer_dma = 0;
cmd->dma_handle = 0;
init_timer(&cmd->timer);
}
/**
* pmcraid_reinit_cmdblk - reinitialize a command block
*
* @cmd: pointer to struct pmcraid_cmd to be reinitialized
*
* Return Value
* None
*/
static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
{
pmcraid_init_cmdblk(cmd, -1);
}
/**
* pmcraid_get_free_cmd - get a free cmd block from command block pool
* @pinstance: adapter instance structure
*
* Return Value:
* returns pointer to cmd block or NULL if no blocks are available
*/
static struct pmcraid_cmd *pmcraid_get_free_cmd(
struct pmcraid_instance *pinstance
)
{
struct pmcraid_cmd *cmd = NULL;
unsigned long lock_flags;
/* free cmd block list is protected by free_pool_lock */
spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
if (!list_empty(&pinstance->free_cmd_pool)) {
cmd = list_entry(pinstance->free_cmd_pool.next,
struct pmcraid_cmd, free_list);
list_del(&cmd->free_list);
}
spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
/* Initialize the command block before giving it the caller */
if (cmd != NULL)
pmcraid_reinit_cmdblk(cmd);
return cmd;
}
/**
* pmcraid_return_cmd - return a completed command block back into free pool
* @cmd: pointer to the command block
*
* Return Value:
* nothing
*/
void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
}
/**
* pmcraid_read_interrupts - reads IOA interrupts
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* interrupts read from IOA
*/
static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
{
return (pinstance->interrupt_mode) ?
ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
}
/**
* pmcraid_disable_interrupts - Masks and clears all specified interrupts
*
* @pinstance: pointer to per adapter instance structure
* @intrs: interrupts to disable
*
* Return Value
* None
*/
static void pmcraid_disable_interrupts(
struct pmcraid_instance *pinstance,
u32 intrs
)
{
u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
ioread32(pinstance->int_regs.global_interrupt_mask_reg);
if (!pinstance->interrupt_mode) {
iowrite32(intrs,
pinstance->int_regs.ioa_host_interrupt_mask_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
}
}
/**
* pmcraid_enable_interrupts - Enables specified interrupts
*
* @pinstance: pointer to per adapter instance structure
* @intr: interrupts to enable
*
* Return Value
* None
*/
static void pmcraid_enable_interrupts(
struct pmcraid_instance *pinstance,
u32 intrs
)
{
u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
if (!pinstance->interrupt_mode) {
iowrite32(~intrs,
pinstance->int_regs.ioa_host_interrupt_mask_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
}
pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
ioread32(pinstance->int_regs.global_interrupt_mask_reg),
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
}
/**
* pmcraid_clr_trans_op - clear trans to op interrupt
*
* @pinstance: pointer to per adapter instance structure
*
* Return Value
* None
*/
static void pmcraid_clr_trans_op(
struct pmcraid_instance *pinstance
)
{
unsigned long lock_flags;
if (!pinstance->interrupt_mode) {
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.ioa_host_interrupt_mask_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
}
if (pinstance->reset_cmd != NULL) {
del_timer(&pinstance->reset_cmd->timer);
spin_lock_irqsave(
pinstance->host->host_lock, lock_flags);
pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
spin_unlock_irqrestore(
pinstance->host->host_lock, lock_flags);
}
}
/**
* pmcraid_reset_type - Determine the required reset type
* @pinstance: pointer to adapter instance structure
*
* IOA requires hard reset if any of the following conditions is true.
* 1. If HRRQ valid interrupt is not masked
* 2. IOA reset alert doorbell is set
* 3. If there are any error interrupts
*/
static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
{
u32 mask;
u32 intrs;
u32 alerts;
mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
if ((mask & INTRS_HRRQ_VALID) == 0 ||
(alerts & DOORBELL_IOA_RESET_ALERT) ||
(intrs & PMCRAID_ERROR_INTERRUPTS)) {
pmcraid_info("IOA requires hard reset\n");
pinstance->ioa_hard_reset = 1;
}
/* If unit check is active, trigger the dump */
if (intrs & INTRS_IOA_UNIT_CHECK)
pinstance->ioa_unit_check = 1;
}
/**
* pmcraid_bist_done - completion function for PCI BIST
* @cmd: pointer to reset command
* Return Value
* none
*/
static void pmcraid_ioa_reset(struct pmcraid_cmd *);
static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
int rc;
u16 pci_reg;
rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
/* If PCI config space can't be accessed wait for another two secs */
if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
cmd->time_left > 0) {
pmcraid_info("BIST not complete, waiting another 2 secs\n");
cmd->timer.expires = jiffies + cmd->time_left;
cmd->time_left = 0;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.function =
(void (*)(unsigned long))pmcraid_bist_done;
add_timer(&cmd->timer);
} else {
cmd->time_left = 0;
pmcraid_info("BIST is complete, proceeding with reset\n");
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
}
}
/**
* pmcraid_start_bist - starts BIST
* @cmd: pointer to reset cmd
* Return Value
* none
*/
static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 doorbells, intrs;
/* proceed with bist and wait for 2 seconds */
iowrite32(DOORBELL_IOA_START_BIST,
pinstance->int_regs.host_ioa_interrupt_reg);
doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("doorbells after start bist: %x intrs: %x\n",
doorbells, intrs);
cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
add_timer(&cmd->timer);
}
/**
* pmcraid_reset_alert_done - completion routine for reset_alert
* @cmd: pointer to command block used in reset sequence
* Return value
* None
*/
static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 status = ioread32(pinstance->ioa_status);
unsigned long lock_flags;
/* if the critical operation in progress bit is set or the wait times
* out, invoke reset engine to proceed with hard reset. If there is
* some more time to wait, restart the timer
*/
if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
cmd->time_left <= 0) {
pmcraid_info("critical op is reset proceeding with reset\n");
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
pmcraid_info("critical op is not yet reset waiting again\n");
/* restart timer if some more time is available to wait */
cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.function =
(void (*)(unsigned long))pmcraid_reset_alert_done;
add_timer(&cmd->timer);
}
}
/**
* pmcraid_reset_alert - alerts IOA for a possible reset
* @cmd : command block to be used for reset sequence.
*
* Return Value
* returns 0 if pci config-space is accessible and RESET_DOORBELL is
* successfully written to IOA. Returns non-zero in case pci_config_space
* is not accessible
*/
static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 doorbells;
int rc;
u16 pci_reg;
/* If we are able to access IOA PCI config space, alert IOA that we are
* going to reset it soon. This enables IOA to preserv persistent error
* data if any. In case memory space is not accessible, proceed with
* BIST or slot_reset
*/
rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
/* wait for IOA permission i.e until CRITICAL_OPERATION bit is
* reset IOA doesn't generate any interrupts when CRITICAL
* OPERATION bit is reset. A timer is started to wait for this
* bit to be reset.
*/
cmd->time_left = PMCRAID_RESET_TIMEOUT;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.function =
(void (*)(unsigned long))pmcraid_reset_alert_done;
add_timer(&cmd->timer);
iowrite32(DOORBELL_IOA_RESET_ALERT,
pinstance->int_regs.host_ioa_interrupt_reg);
doorbells =
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
pmcraid_info("doorbells after reset alert: %x\n", doorbells);
} else {
pmcraid_info("PCI config is not accessible starting BIST\n");
pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
pmcraid_start_bist(cmd);
}
}
/**
* pmcraid_timeout_handler - Timeout handler for internally generated ops
*
* @cmd : pointer to command structure, that got timedout
*
* This function blocks host requests and initiates an adapter reset.
*
* Return value:
* None
*/
static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
dev_info(&pinstance->pdev->dev,
"Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
cmd->ioa_cb->ioarcb.cdb[0]);
/* Command timeouts result in hard reset sequence. The command that got
* timed out may be the one used as part of reset sequence. In this
* case restart reset sequence using the same command block even if
* reset is in progress. Otherwise fail this command and get a free
* command block to restart the reset sequence.
*/
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (!pinstance->ioa_reset_in_progress) {
pinstance->ioa_reset_attempts = 0;
cmd = pmcraid_get_free_cmd(pinstance);
/* If we are out of command blocks, just return here itself.
* Some other command's timeout handler can do the reset job
*/
if (cmd == NULL) {
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
pmcraid_err("no free cmnd block for timeout handler\n");
return;
}
pinstance->reset_cmd = cmd;
pinstance->ioa_reset_in_progress = 1;
} else {
pmcraid_info("reset is already in progress\n");
if (pinstance->reset_cmd != cmd) {
/* This command should have been given to IOA, this
* command will be completed by fail_outstanding_cmds
* anyway
*/
pmcraid_err("cmd is pending but reset in progress\n");
}
/* If this command was being used as part of the reset
* sequence, set cmd_done pointer to pmcraid_ioa_reset. This
* causes fail_outstanding_commands not to return the command
* block back to free pool
*/
if (cmd == pinstance->reset_cmd)
cmd->cmd_done = pmcraid_ioa_reset;
}
/* Notify apps of important IOA bringup/bringdown sequences */
if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_START);
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
scsi_block_requests(pinstance->host);
pmcraid_reset_alert(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
}
/**
* pmcraid_internal_done - completion routine for internally generated cmds
*
* @cmd: command that got response from IOA
*
* Return Value:
* none
*/
static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
{
pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
/* Some of the internal commands are sent with callers blocking for the
* response. Same will be indicated as part of cmd->completion_req
* field. Response path needs to wake up any waiters waiting for cmd
* completion if this flag is set.
*/
if (cmd->completion_req) {
cmd->completion_req = 0;
complete(&cmd->wait_for_completion);
}
/* most of the internal commands are completed by caller itself, so
* no need to return the command block back to free pool until we are
* required to do so (e.g once done with initialization).
*/
if (cmd->release) {
cmd->release = 0;
pmcraid_return_cmd(cmd);
}
}
/**
* pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
*
* @cmd: command that got response from IOA
*
* This routine is called after driver re-reads configuration table due to a
* lost CCN. It returns the command block back to free pool and schedules
* worker thread to add/delete devices into the system.
*
* Return Value:
* none
*/
static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
{
pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
if (cmd->release) {
cmd->release = 0;
pmcraid_return_cmd(cmd);
}
pmcraid_info("scheduling worker for config table reinitialization\n");
schedule_work(&cmd->drv_inst->worker_q);
}
/**
* pmcraid_erp_done - Process completion of SCSI error response from device
* @cmd: pmcraid_command
*
* This function copies the sense buffer into the scsi_cmd struct and completes
* scsi_cmd by calling scsi_done function.
*
* Return value:
* none
*/
static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
scsi_cmd->result |= (DID_ERROR << 16);
scmd_printk(KERN_INFO, scsi_cmd,
"command CDB[0] = %x failed with IOASC: 0x%08X\n",
cmd->ioa_cb->ioarcb.cdb[0], ioasc);
}
/* if we had allocated sense buffers for request sense, copy the sense
* release the buffers
*/
if (cmd->sense_buffer != NULL) {
memcpy(scsi_cmd->sense_buffer,
cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
pci_free_consistent(pinstance->pdev,
SCSI_SENSE_BUFFERSIZE,
cmd->sense_buffer, cmd->sense_buffer_dma);
cmd->sense_buffer = NULL;
cmd->sense_buffer_dma = 0;
}
scsi_dma_unmap(scsi_cmd);
pmcraid_return_cmd(cmd);
scsi_cmd->scsi_done(scsi_cmd);
}
/**
* pmcraid_fire_command - sends an IOA command to adapter
*
* This function adds the given block into pending command list
* and returns without waiting
*
* @cmd : command to be sent to the device
*
* Return Value
* None
*/
static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
/* Add this command block to pending cmd pool. We do this prior to
* writting IOARCB to ioarrin because IOA might complete the command
* by the time we are about to add it to the list. Response handler
* (isr/tasklet) looks for cmd block in the pending pending list.
*/
spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
atomic_inc(&pinstance->outstanding_cmds);
/* driver writes lower 32-bit value of IOARCB address only */
mb();
iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
pinstance->ioarrin);
}
/**
* pmcraid_send_cmd - fires a command to IOA
*
* This function also sets up timeout function, and command completion
* function
*
* @cmd: pointer to the command block to be fired to IOA
* @cmd_done: command completion function, called once IOA responds
* @timeout: timeout to wait for this command completion
* @timeout_func: timeout handler
*
* Return value
* none
*/
static void pmcraid_send_cmd(
struct pmcraid_cmd *cmd,
void (*cmd_done) (struct pmcraid_cmd *),
unsigned long timeout,
void (*timeout_func) (struct pmcraid_cmd *)
)
{
/* initialize done function */
cmd->cmd_done = cmd_done;
if (timeout_func) {
/* setup timeout handler */
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + timeout;
cmd->timer.function = (void (*)(unsigned long))timeout_func;
add_timer(&cmd->timer);
}
/* fire the command to IOA */
_pmcraid_fire_command(cmd);
}
/**
* pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
* @cmd: pointer to the command block used for sending IOA shutdown command
*
* Return value
* None
*/
static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
}
/**
* pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
*
* @cmd: pointer to the command block used as part of reset sequence
*
* Return Value
* None
*/
static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
{
pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
/* Note that commands sent during reset require next command to be sent
* to IOA. Hence reinit the done function as well as timeout function
*/
pmcraid_reinit_cmdblk(cmd);
cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
cmd->ioa_cb->ioarcb.resource_handle =
cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
/* fire shutdown command to hardware. */
pmcraid_info("firing normal shutdown command (%d) to IOA\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
PMCRAID_SHUTDOWN_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_get_fwversion_done - completion function for get_fwversion
*
* @cmd: pointer to command block used to send INQUIRY command
*
* Return Value
* none
*/
static void pmcraid_querycfg(struct pmcraid_cmd *);
static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
unsigned long lock_flags;
/* configuration table entry size depends on firmware version. If fw
* version is not known, it is not possible to interpret IOA config
* table
*/
if (ioasc) {
pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
pmcraid_querycfg(cmd);
}
}
/**
* pmcraid_get_fwversion - reads firmware version information
*
* @cmd: pointer to command block used to send INQUIRY command
*
* Return Value
* none
*/
static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct pmcraid_instance *pinstance = cmd->drv_inst;
u16 data_size = sizeof(struct pmcraid_inquiry_data);
pmcraid_reinit_cmdblk(cmd);
ioarcb->request_type = REQ_TYPE_SCSI;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = INQUIRY;
ioarcb->cdb[1] = 1;
ioarcb->cdb[2] = 0xD0;
ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
ioarcb->cdb[4] = data_size & 0xFF;
/* Since entire inquiry data it can be part of IOARCB itself
*/
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length = cpu_to_le32(data_size);
ioadl = &(ioarcb->add_data.u.ioadl[0]);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
ioadl->data_len = cpu_to_le32(data_size);
pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
}
/**
* pmcraid_identify_hrrq - registers host rrq buffers with IOA
* @cmd: pointer to command block to be used for identify hrrq
*
* Return Value
* none
*/
static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
int index = cmd->hrrq_index;
__be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
void (*done_function)(struct pmcraid_cmd *);
pmcraid_reinit_cmdblk(cmd);
cmd->hrrq_index = index + 1;
if (cmd->hrrq_index < pinstance->num_hrrq) {
done_function = pmcraid_identify_hrrq;
} else {
cmd->hrrq_index = 0;
done_function = pmcraid_get_fwversion;
}
/* Initialize ioarcb */
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
/* initialize the hrrq number where IOA will respond to this command */
ioarcb->hrrq_id = index;
ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
ioarcb->cdb[1] = index;
/* IOA expects 64-bit pci address to be written in B.E format
* (i.e cdb[2]=MSByte..cdb[9]=LSB.
*/
pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
hrrq_addr, ioarcb->ioarcb_bus_addr, index);
memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
/* Subsequent commands require HRRQ identification to be successful.
* Note that this gets called even during reset from SCSI mid-layer
* or tasklet
*/
pmcraid_send_cmd(cmd, done_function,
PMCRAID_INTERNAL_TIMEOUT,
pmcraid_timeout_handler);
}
static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
/**
* pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
*
* @cmd: initialized command block pointer
*
* Return Value
* none
*/
static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
{
if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
else
atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
}
/**
* pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
*
* @pinstance: pointer to adapter instance structure
* @type: HCAM type
*
* Return Value
* pointer to initialized pmcraid_cmd structure or NULL
*/
static struct pmcraid_cmd *pmcraid_init_hcam
(
struct pmcraid_instance *pinstance,
u8 type
)
{
struct pmcraid_cmd *cmd;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_ioadl_desc *ioadl;
struct pmcraid_hostrcb *hcam;
void (*cmd_done) (struct pmcraid_cmd *);
dma_addr_t dma;
int rcb_size;
cmd = pmcraid_get_free_cmd(pinstance);
if (!cmd) {
pmcraid_err("no free command blocks for hcam\n");
return cmd;
}
if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
cmd_done = pmcraid_process_ccn;
dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
hcam = &pinstance->ccn;
} else {
rcb_size = sizeof(struct pmcraid_hcam_ldn);
cmd_done = pmcraid_process_ldn;
dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
hcam = &pinstance->ldn;
}
/* initialize command pointer used for HCAM registration */
hcam->cmd = cmd;
ioarcb = &cmd->ioa_cb->ioarcb;
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioadl = ioarcb->add_data.u.ioadl;
/* Initialize ioarcb */
ioarcb->request_type = REQ_TYPE_HCAM;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
ioarcb->cdb[1] = type;
ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
ioarcb->cdb[8] = (rcb_size) & 0xFF;
ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
ioadl[0].data_len = cpu_to_le32(rcb_size);
ioadl[0].address = cpu_to_le32(dma);
cmd->cmd_done = cmd_done;
return cmd;
}
/**
* pmcraid_send_hcam - Send an HCAM to IOA
* @pinstance: ioa config struct
* @type: HCAM type
*
* This function will send a Host Controlled Async command to IOA.
*
* Return value:
* none
*/
static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
{
struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
pmcraid_send_hcam_cmd(cmd);
}
/**
* pmcraid_prepare_cancel_cmd - prepares a command block to abort another
*
* @cmd: pointer to cmd that is used as cancelling command
* @cmd_to_cancel: pointer to the command that needs to be cancelled
*/
static void pmcraid_prepare_cancel_cmd(
struct pmcraid_cmd *cmd,
struct pmcraid_cmd *cmd_to_cancel
)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
__be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
/* Get the resource handle to where the command to be aborted has been
* sent.
*/
ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
ioarcb->request_type = REQ_TYPE_IOACMD;
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
/* IOARCB address of the command to be cancelled is given in
* cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
* IOARCB address are not masked.
*/
ioarcb_addr = cpu_to_be64(ioarcb_addr);
memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
}
/**
* pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
*
* @cmd: command to be used as cancelling command
* @type: HCAM type
* @cmd_done: op done function for the cancelling command
*/
static void pmcraid_cancel_hcam(
struct pmcraid_cmd *cmd,
u8 type,
void (*cmd_done) (struct pmcraid_cmd *)
)
{
struct pmcraid_instance *pinstance;
struct pmcraid_hostrcb *hcam;
pinstance = cmd->drv_inst;
hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
&pinstance->ldn : &pinstance->ccn;
/* prepare for cancelling previous hcam command. If the HCAM is
* currently not pending with IOA, we would have hcam->cmd as non-null
*/
if (hcam->cmd == NULL)
return;
pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
*/
pmcraid_send_cmd(cmd, cmd_done,
PMCRAID_INTERNAL_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
*
* @cmd: command block to be used for cancelling the HCAM
*/
static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
{
pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
pmcraid_reinit_cmdblk(cmd);
pmcraid_cancel_hcam(cmd,
PMCRAID_HCAM_CODE_CONFIG_CHANGE,
pmcraid_ioa_shutdown);
}
/**
* pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
*
* @cmd: command block to be used for cancelling the HCAM
*/
static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
{
pmcraid_cancel_hcam(cmd,
PMCRAID_HCAM_CODE_LOG_DATA,
pmcraid_cancel_ccn);
}
/**
* pmcraid_expose_resource - check if the resource can be exposed to OS
*
* @fw_version: firmware version code
* @cfgte: pointer to configuration table entry of the resource
*
* Return value:
* true if resource can be added to midlayer, false(0) otherwise
*/
static int pmcraid_expose_resource(u16 fw_version,
struct pmcraid_config_table_entry *cfgte)
{
int retval = 0;
if (cfgte->resource_type == RES_TYPE_VSET) {
if (fw_version <= PMCRAID_FW_VERSION_1)
retval = ((cfgte->unique_flags1 & 0x80) == 0);
else
retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
(cfgte->unique_flags1 & 0x80) == 0);
} else if (cfgte->resource_type == RES_TYPE_GSCSI)
retval = (RES_BUS(cfgte->resource_address) !=
PMCRAID_VIRTUAL_ENCL_BUS_ID);
return retval;
}
/* attributes supported by pmcraid_event_family */
enum {
PMCRAID_AEN_ATTR_UNSPEC,
PMCRAID_AEN_ATTR_EVENT,
__PMCRAID_AEN_ATTR_MAX,
};
#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
/* commands supported by pmcraid_event_family */
enum {
PMCRAID_AEN_CMD_UNSPEC,
PMCRAID_AEN_CMD_EVENT,
__PMCRAID_AEN_CMD_MAX,
};
#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
static struct genl_family pmcraid_event_family = {
.id = GENL_ID_GENERATE,
.name = "pmcraid",
.version = 1,
.maxattr = PMCRAID_AEN_ATTR_MAX
};
/**
* pmcraid_netlink_init - registers pmcraid_event_family
*
* Return value:
* 0 if the pmcraid_event_family is successfully registered
* with netlink generic, non-zero otherwise
*/
static int pmcraid_netlink_init(void)
{
int result;
result = genl_register_family(&pmcraid_event_family);
if (result)
return result;
pmcraid_info("registered NETLINK GENERIC group: %d\n",
pmcraid_event_family.id);
return result;
}
/**
* pmcraid_netlink_release - unregisters pmcraid_event_family
*
* Return value:
* none
*/
static void pmcraid_netlink_release(void)
{
genl_unregister_family(&pmcraid_event_family);
}
/**
* pmcraid_notify_aen - sends event msg to user space application
* @pinstance: pointer to adapter instance structure
* @type: HCAM type
*
* Return value:
* 0 if success, error value in case of any failure.
*/
static int pmcraid_notify_aen(
struct pmcraid_instance *pinstance,
struct pmcraid_aen_msg *aen_msg,
u32 data_size
)
{
struct sk_buff *skb;
void *msg_header;
u32 total_size, nla_genl_hdr_total_size;
int result;
aen_msg->hostno = (pinstance->host->unique_id << 16 |
MINOR(pinstance->cdev.dev));
aen_msg->length = data_size;
data_size += sizeof(*aen_msg);
total_size = nla_total_size(data_size);
/* Add GENL_HDR to total_size */
nla_genl_hdr_total_size =
(total_size + (GENL_HDRLEN +
((struct genl_family *)&pmcraid_event_family)->hdrsize)
+ NLMSG_HDRLEN);
skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
if (!skb) {
pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
total_size);
return -ENOMEM;
}
/* add the genetlink message header */
msg_header = genlmsg_put(skb, 0, 0,
&pmcraid_event_family, 0,
PMCRAID_AEN_CMD_EVENT);
if (!msg_header) {
pmcraid_err("failed to copy command details\n");
nlmsg_free(skb);
return -ENOMEM;
}
result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
if (result) {
pmcraid_err("failed to copy AEN attribute data\n");
nlmsg_free(skb);
return -EINVAL;
}
/* send genetlink multicast message to notify appplications */
result = genlmsg_end(skb, msg_header);
if (result < 0) {
pmcraid_err("genlmsg_end failed\n");
nlmsg_free(skb);
return result;
}
result =
genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
/* If there are no listeners, genlmsg_multicast may return non-zero
* value.
*/
if (result)
pmcraid_info("error (%x) sending aen event message\n", result);
return result;
}
/**
* pmcraid_notify_ccn - notifies about CCN event msg to user space
* @pinstance: pointer adapter instance structure
*
* Return value:
* 0 if success, error value in case of any failure
*/
static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
{
return pmcraid_notify_aen(pinstance,
pinstance->ccn.msg,
pinstance->ccn.hcam->data_len +
sizeof(struct pmcraid_hcam_hdr));
}
/**
* pmcraid_notify_ldn - notifies about CCN event msg to user space
* @pinstance: pointer adapter instance structure
*
* Return value:
* 0 if success, error value in case of any failure
*/
static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
{
return pmcraid_notify_aen(pinstance,
pinstance->ldn.msg,
pinstance->ldn.hcam->data_len +
sizeof(struct pmcraid_hcam_hdr));
}
/**
* pmcraid_notify_ioastate - sends IOA state event msg to user space
* @pinstance: pointer adapter instance structure
* @evt: controller state event to be sent
*
* Return value:
* 0 if success, error value in case of any failure
*/
static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
{
pinstance->scn.ioa_state = evt;
pmcraid_notify_aen(pinstance,
&pinstance->scn.msg,
sizeof(u32));
}
/**
* pmcraid_handle_config_change - Handle a config change from the adapter
* @pinstance: pointer to per adapter instance structure
*
* Return value:
* none
*/
static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
{
struct pmcraid_config_table_entry *cfg_entry;
struct pmcraid_hcam_ccn *ccn_hcam;
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cfgcmd;
struct pmcraid_resource_entry *res = NULL;
unsigned long lock_flags;
unsigned long host_lock_flags;
u32 new_entry = 1;
u32 hidden_entry = 0;
u16 fw_version;
int rc;
ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
cfg_entry = &ccn_hcam->cfg_entry;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
res: %x:%x:%x:%x\n",
pinstance->ccn.hcam->ilid,
pinstance->ccn.hcam->op_code,
((pinstance->ccn.hcam->timestamp1) |
((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
pinstance->ccn.hcam->notification_type,
pinstance->ccn.hcam->notification_lost,
pinstance->ccn.hcam->flags,
pinstance->host->unique_id,
RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
(RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
RES_BUS(cfg_entry->resource_address)),
RES_IS_VSET(*cfg_entry) ?
(fw_version <= PMCRAID_FW_VERSION_1 ?
cfg_entry->unique_flags1 :
cfg_entry->array_id & 0xFF) :
RES_TARGET(cfg_entry->resource_address),
RES_LUN(cfg_entry->resource_address));
/* If this HCAM indicates a lost notification, read the config table */
if (pinstance->ccn.hcam->notification_lost) {
cfgcmd = pmcraid_get_free_cmd(pinstance);
if (cfgcmd) {
pmcraid_info("lost CCN, reading config table\b");
pinstance->reinit_cfg_table = 1;
pmcraid_querycfg(cfgcmd);
} else {
pmcraid_err("lost CCN, no free cmd for querycfg\n");
}
goto out_notify_apps;
}
/* If this resource is not going to be added to mid-layer, just notify
* applications and return. If this notification is about hiding a VSET
* resource, check if it was exposed already.
*/
if (pinstance->ccn.hcam->notification_type ==
NOTIFICATION_TYPE_ENTRY_CHANGED &&
cfg_entry->resource_type == RES_TYPE_VSET) {
if (fw_version <= PMCRAID_FW_VERSION_1)
hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
else
hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
} else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
goto out_notify_apps;
}
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry(res, &pinstance->used_res_q, queue) {
rc = memcmp(&res->cfg_entry.resource_address,
&cfg_entry->resource_address,
sizeof(cfg_entry->resource_address));
if (!rc) {
new_entry = 0;
break;
}
}
if (new_entry) {
if (hidden_entry) {
spin_unlock_irqrestore(&pinstance->resource_lock,
lock_flags);
goto out_notify_apps;
}
/* If there are more number of resources than what driver can
* manage, do not notify the applications about the CCN. Just
* ignore this notifications and re-register the same HCAM
*/
if (list_empty(&pinstance->free_res_q)) {
spin_unlock_irqrestore(&pinstance->resource_lock,
lock_flags);
pmcraid_err("too many resources attached\n");
spin_lock_irqsave(pinstance->host->host_lock,
host_lock_flags);
pmcraid_send_hcam(pinstance,
PMCRAID_HCAM_CODE_CONFIG_CHANGE);
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
return;
}
res = list_entry(pinstance->free_res_q.next,
struct pmcraid_resource_entry, queue);
list_del(&res->queue);
res->scsi_dev = NULL;
res->reset_progress = 0;
list_add_tail(&res->queue, &pinstance->used_res_q);
}
memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
if (pinstance->ccn.hcam->notification_type ==
NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
if (res->scsi_dev) {
if (fw_version <= PMCRAID_FW_VERSION_1)
res->cfg_entry.unique_flags1 &= 0x7F;
else
res->cfg_entry.array_id &= 0xFF;
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
schedule_work(&pinstance->worker_q);
} else {
/* This may be one of the non-exposed resources */
list_move_tail(&res->queue, &pinstance->free_res_q);
}
} else if (!res->scsi_dev) {
res->change_detected = RES_CHANGE_ADD;
schedule_work(&pinstance->worker_q);
}
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
out_notify_apps:
/* Notify configuration changes to registered applications.*/
if (!pmcraid_disable_aen)
pmcraid_notify_ccn(pinstance);
cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
if (cmd)
pmcraid_send_hcam_cmd(cmd);
}
/**
* pmcraid_get_error_info - return error string for an ioasc
* @ioasc: ioasc code
* Return Value
* none
*/
static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
{
int i;
for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
return &pmcraid_ioasc_error_table[i];
}
return NULL;
}
/**
* pmcraid_ioasc_logger - log IOASC information based user-settings
* @ioasc: ioasc code
* @cmd: pointer to command that resulted in 'ioasc'
*/
void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
{
struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
if (error_info == NULL ||
cmd->drv_inst->current_log_level < error_info->log_level)
return;
/* log the error string */
pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
cmd->ioa_cb->ioarcb.cdb[0],
cmd->ioa_cb->ioarcb.resource_handle,
le32_to_cpu(ioasc), error_info->error_string);
}
/**
* pmcraid_handle_error_log - Handle a config change (error log) from the IOA
*
* @pinstance: pointer to per adapter instance structure
*
* Return value:
* none
*/
static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
{
struct pmcraid_hcam_ldn *hcam_ldn;
u32 ioasc;
hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
pmcraid_info
("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
pinstance->ldn.hcam->ilid,
pinstance->ldn.hcam->op_code,
pinstance->ldn.hcam->notification_type,
pinstance->ldn.hcam->notification_lost,
pinstance->ldn.hcam->flags,
pinstance->ldn.hcam->overlay_id);
/* log only the errors, no need to log informational log entries */
if (pinstance->ldn.hcam->notification_type !=
NOTIFICATION_TYPE_ERROR_LOG)
return;
if (pinstance->ldn.hcam->notification_lost ==
HOSTRCB_NOTIFICATIONS_LOST)
dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
dev_info(&pinstance->pdev->dev,
"UnitAttention due to IOA Bus Reset\n");
scsi_report_bus_reset(
pinstance->host,
RES_BUS(hcam_ldn->error_log.fd_ra));
}
return;
}
/**
* pmcraid_process_ccn - Op done function for a CCN.
* @cmd: pointer to command struct
*
* This function is the op done function for a configuration
* change notification
*
* Return value:
* none
*/
static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
unsigned long lock_flags;
pinstance->ccn.cmd = NULL;
pmcraid_return_cmd(cmd);
/* If driver initiated IOA reset happened while this hcam was pending
* with IOA, or IOA bringdown sequence is in progress, no need to
* re-register the hcam
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
atomic_read(&pinstance->ccn.ignore) == 1) {
return;
} else if (ioasc) {
dev_info(&pinstance->pdev->dev,
"Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
pmcraid_handle_config_change(pinstance);
}
}
/**
* pmcraid_process_ldn - op done function for an LDN
* @cmd: pointer to command block
*
* Return value
* none
*/
static void pmcraid_initiate_reset(struct pmcraid_instance *);
static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_hcam_ldn *ldn_hcam =
(struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
unsigned long lock_flags;
/* return the command block back to freepool */
pinstance->ldn.cmd = NULL;
pmcraid_return_cmd(cmd);
/* If driver initiated IOA reset happened while this hcam was pending
* with IOA, no need to re-register the hcam as reset engine will do it
* once reset sequence is complete
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
atomic_read(&pinstance->ccn.ignore) == 1) {
return;
} else if (!ioasc) {
pmcraid_handle_error_log(pinstance);
if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
spin_lock_irqsave(pinstance->host->host_lock,
lock_flags);
pmcraid_initiate_reset(pinstance);
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
return;
}
if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
pinstance->timestamp_error = 1;
pmcraid_set_timestamp(cmd);
}
} else {
dev_info(&pinstance->pdev->dev,
"Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
}
/* send netlink message for HCAM notification if enabled */
if (!pmcraid_disable_aen)
pmcraid_notify_ldn(pinstance);
cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
if (cmd)
pmcraid_send_hcam_cmd(cmd);
}
/**
* pmcraid_register_hcams - register HCAMs for CCN and LDN
*
* @pinstance: pointer per adapter instance structure
*
* Return Value
* none
*/
static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
{
pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
}
/**
* pmcraid_unregister_hcams - cancel HCAMs registered already
* @cmd: pointer to command used as part of reset sequence
*/
static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
/* During IOA bringdown, HCAM gets fired and tasklet proceeds with
* handling hcam response though it is not necessary. In order to
* prevent this, set 'ignore', so that bring-down sequence doesn't
* re-send any more hcams
*/
atomic_set(&pinstance->ccn.ignore, 1);
atomic_set(&pinstance->ldn.ignore, 1);
/* If adapter reset was forced as part of runtime reset sequence,
* start the reset sequence. Reset will be triggered even in case
* IOA unit_check.
*/
if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
pinstance->ioa_unit_check) {
pinstance->force_ioa_reset = 0;
pinstance->ioa_unit_check = 0;
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
return;
}
/* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
* one after the other. So CCN cancellation will be triggered by
* pmcraid_cancel_ldn itself.
*/
pmcraid_cancel_ldn(cmd);
}
/**
* pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
* @pinstance: pointer to adapter instance structure
* Return Value
* 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
*/
static void pmcraid_reinit_buffers(struct pmcraid_instance *);
static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
{
u32 intrs;
pmcraid_reinit_buffers(pinstance);
intrs = pmcraid_read_interrupts(pinstance);
pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
if (!pinstance->interrupt_mode) {
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.
ioa_host_interrupt_mask_reg);
iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
}
return 1;
} else {
return 0;
}
}
/**
* pmcraid_soft_reset - performs a soft reset and makes IOA become ready
* @cmd : pointer to reset command block
*
* Return Value
* none
*/
static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 int_reg;
u32 doorbell;
/* There will be an interrupt when Transition to Operational bit is
* set so tasklet would execute next reset task. The timeout handler
* would re-initiate a reset
*/
cmd->cmd_done = pmcraid_ioa_reset;
cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies +
msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
if (!timer_pending(&cmd->timer))
add_timer(&cmd->timer);
/* Enable destructive diagnostics on IOA if it is not yet in
* operational state
*/
doorbell = DOORBELL_RUNTIME_RESET |
DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
/* Since we do RESET_ALERT and Start BIST we have to again write
* MSIX Doorbell to indicate the interrupt mode
*/
if (pinstance->interrupt_mode) {
iowrite32(DOORBELL_INTR_MODE_MSIX,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
}
iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
pmcraid_info("Waiting for IOA to become operational %x:%x\n",
ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
int_reg);
}
/**
* pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
*
* @pinstance: pointer to adapter instance structure
*
* Return Value
* none
*/
static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
{
pmcraid_info("%s is not yet implemented\n", __func__);
}
/**
* pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
* @pinstance: pointer to adapter instance structure
*
* This function fails all outstanding ops. If they are submitted to IOA
* already, it sends cancel all messages if IOA is still accepting IOARCBs,
* otherwise just completes the commands and returns the cmd blocks to free
* pool.
*
* Return value:
* none
*/
static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
{
struct pmcraid_cmd *cmd, *temp;
unsigned long lock_flags;
/* pending command list is protected by pending_pool_lock. Its
* traversal must be done as within this lock
*/
spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
free_list) {
list_del(&cmd->free_list);
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
lock_flags);
cmd->ioa_cb->ioasa.ioasc =
cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
cmd->ioa_cb->ioasa.ilid =
cpu_to_be32(PMCRAID_DRIVER_ILID);
/* In case the command timer is still running */
del_timer(&cmd->timer);
/* If this is an IO command, complete it by invoking scsi_done
* function. If this is one of the internal commands other
* than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
* complete it
*/
if (cmd->scsi_cmd) {
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
__le32 resp = cmd->ioa_cb->ioarcb.response_handle;
scsi_cmd->result |= DID_ERROR << 16;
scsi_dma_unmap(scsi_cmd);
pmcraid_return_cmd(cmd);
pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
le32_to_cpu(resp) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
scsi_cmd->result);
scsi_cmd->scsi_done(scsi_cmd);
} else if (cmd->cmd_done == pmcraid_internal_done ||
cmd->cmd_done == pmcraid_erp_done) {
cmd->cmd_done(cmd);
} else if (cmd->cmd_done != pmcraid_ioa_reset &&
cmd->cmd_done != pmcraid_ioa_shutdown_done) {
pmcraid_return_cmd(cmd);
}
atomic_dec(&pinstance->outstanding_cmds);
spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
}
spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
}
/**
* pmcraid_ioa_reset - Implementation of IOA reset logic
*
* @cmd: pointer to the cmd block to be used for entire reset process
*
* This function executes most of the steps required for IOA reset. This gets
* called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
* 'eh_' thread. Access to variables used for controlling the reset sequence is
* synchronized using host lock. Various functions called during reset process
* would make use of a single command block, pointer to which is also stored in
* adapter instance structure.
*
* Return Value
* None
*/
static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
u8 reset_complete = 0;
pinstance->ioa_reset_in_progress = 1;
if (pinstance->reset_cmd != cmd) {
pmcraid_err("reset is called with different command block\n");
pinstance->reset_cmd = cmd;
}
pmcraid_info("reset_engine: state = %d, command = %p\n",
pinstance->ioa_state, cmd);
switch (pinstance->ioa_state) {
case IOA_STATE_DEAD:
/* If IOA is offline, whatever may be the reset reason, just
* return. callers might be waiting on the reset wait_q, wake
* up them
*/
pmcraid_err("IOA is offline no reset is possible\n");
reset_complete = 1;
break;
case IOA_STATE_IN_BRINGDOWN:
/* we enter here, once ioa shutdown command is processed by IOA
* Alert IOA for a possible reset. If reset alert fails, IOA
* goes through hard-reset
*/
pmcraid_disable_interrupts(pinstance, ~0);
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
break;
case IOA_STATE_UNKNOWN:
/* We may be called during probe or resume. Some pre-processing
* is required for prior to reset
*/
scsi_block_requests(pinstance->host);
/* If asked to reset while IOA was processing responses or
* there are any error responses then IOA may require
* hard-reset.
*/
if (pinstance->ioa_hard_reset == 0) {
if (ioread32(pinstance->ioa_status) &
INTRS_TRANSITION_TO_OPERATIONAL) {
pmcraid_info("sticky bit set, bring-up\n");
pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
pmcraid_reinit_cmdblk(cmd);
pmcraid_identify_hrrq(cmd);
} else {
pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
pmcraid_soft_reset(cmd);
}
} else {
/* Alert IOA of a possible reset and wait for critical
* operation in progress bit to reset
*/
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
}
break;
case IOA_STATE_IN_RESET_ALERT:
/* If critical operation in progress bit is reset or wait gets
* timed out, reset proceeds with starting BIST on the IOA.
* pmcraid_ioa_hard_reset keeps a count of reset attempts. If
* they are 3 or more, reset engine marks IOA dead and returns
*/
pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
pmcraid_start_bist(cmd);
break;
case IOA_STATE_IN_HARD_RESET:
pinstance->ioa_reset_attempts++;
/* retry reset if we haven't reached maximum allowed limit */
if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
pinstance->ioa_reset_attempts = 0;
pmcraid_err("IOA didn't respond marking it as dead\n");
pinstance->ioa_state = IOA_STATE_DEAD;
if (pinstance->ioa_bringdown)
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
else
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_FAILED);
reset_complete = 1;
break;
}
/* Once either bist or pci reset is done, restore PCI config
* space. If this fails, proceed with hard reset again
*/
pci_restore_state(pinstance->pdev);
/* fail all pending commands */
pmcraid_fail_outstanding_cmds(pinstance);
/* check if unit check is active, if so extract dump */
if (pinstance->ioa_unit_check) {
pmcraid_info("unit check is active\n");
pinstance->ioa_unit_check = 0;
pmcraid_get_dump(pinstance);
pinstance->ioa_reset_attempts--;
pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
pmcraid_reset_alert(cmd);
break;
}
/* if the reset reason is to bring-down the ioa, we might be
* done with the reset restore pci_config_space and complete
* the reset
*/
if (pinstance->ioa_bringdown) {
pmcraid_info("bringing down the adapter\n");
pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
pinstance->ioa_bringdown = 0;
pinstance->ioa_state = IOA_STATE_UNKNOWN;
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
reset_complete = 1;
} else {
/* bring-up IOA, so proceed with soft reset
* Reinitialize hrrq_buffers and their indices also
* enable interrupts after a pci_restore_state
*/
if (pmcraid_reset_enable_ioa(pinstance)) {
pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
pmcraid_info("bringing up the adapter\n");
pmcraid_reinit_cmdblk(cmd);
pmcraid_identify_hrrq(cmd);
} else {
pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
pmcraid_soft_reset(cmd);
}
}
break;
case IOA_STATE_IN_SOFT_RESET:
/* TRANSITION TO OPERATIONAL is on so start initialization
* sequence
*/
pmcraid_info("In softreset proceeding with bring-up\n");
pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
/* Initialization commands start with HRRQ identification. From
* now on tasklet completes most of the commands as IOA is up
* and intrs are enabled
*/
pmcraid_identify_hrrq(cmd);
break;
case IOA_STATE_IN_BRINGUP:
/* we are done with bringing up of IOA, change the ioa_state to
* operational and wake up any waiters
*/
pinstance->ioa_state = IOA_STATE_OPERATIONAL;
reset_complete = 1;
break;
case IOA_STATE_OPERATIONAL:
default:
/* When IOA is operational and a reset is requested, check for
* the reset reason. If reset is to bring down IOA, unregister
* HCAMs and initiate shutdown; if adapter reset is forced then
* restart reset sequence again
*/
if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
pinstance->force_ioa_reset == 0) {
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_SUCCESS);
reset_complete = 1;
} else {
if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
pmcraid_reinit_cmdblk(cmd);
pmcraid_unregister_hcams(cmd);
}
break;
}
/* reset will be completed if ioa_state is either DEAD or UNKNOWN or
* OPERATIONAL. Reset all control variables used during reset, wake up
* any waiting threads and let the SCSI mid-layer send commands. Note
* that host_lock must be held before invoking scsi_report_bus_reset.
*/
if (reset_complete) {
pinstance->ioa_reset_in_progress = 0;
pinstance->ioa_reset_attempts = 0;
pinstance->reset_cmd = NULL;
pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
pinstance->ioa_bringdown = 0;
pmcraid_return_cmd(cmd);
/* If target state is to bring up the adapter, proceed with
* hcam registration and resource exposure to mid-layer.
*/
if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
pmcraid_register_hcams(pinstance);
wake_up_all(&pinstance->reset_wait_q);
}
return;
}
/**
* pmcraid_initiate_reset - initiates reset sequence. This is called from
* ISR/tasklet during error interrupts including IOA unit check. If reset
* is already in progress, it just returns, otherwise initiates IOA reset
* to bring IOA up to operational state.
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* none
*/
static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
{
struct pmcraid_cmd *cmd;
/* If the reset is already in progress, just return, otherwise start
* reset sequence and return
*/
if (!pinstance->ioa_reset_in_progress) {
scsi_block_requests(pinstance->host);
cmd = pmcraid_get_free_cmd(pinstance);
if (cmd == NULL) {
pmcraid_err("no cmnd blocks for initiate_reset\n");
return;
}
pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
pinstance->reset_cmd = cmd;
pinstance->force_ioa_reset = 1;
pmcraid_notify_ioastate(pinstance,
PMC_DEVICE_EVENT_RESET_START);
pmcraid_ioa_reset(cmd);
}
}
/**
* pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
* or bringdown IOA
* @pinstance: pointer adapter instance structure
* @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
* @target_state: expected target state after reset
*
* Note: This command initiates reset and waits for its completion. Hence this
* should not be called from isr/timer/tasklet functions (timeout handlers,
* error response handlers and interrupt handlers).
*
* Return Value
* 1 in case ioa_state is not target_state, 0 otherwise.
*/
static int pmcraid_reset_reload(
struct pmcraid_instance *pinstance,
u8 shutdown_type,
u8 target_state
)
{
struct pmcraid_cmd *reset_cmd = NULL;
unsigned long lock_flags;
int reset = 1;
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_reset_in_progress) {
pmcraid_info("reset_reload: reset is already in progress\n");
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
wait_event(pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_state == IOA_STATE_DEAD) {
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
pmcraid_info("reset_reload: IOA is dead\n");
return reset;
} else if (pinstance->ioa_state == target_state) {
reset = 0;
}
}
if (reset) {
pmcraid_info("reset_reload: proceeding with reset\n");
scsi_block_requests(pinstance->host);
reset_cmd = pmcraid_get_free_cmd(pinstance);
if (reset_cmd == NULL) {
pmcraid_err("no free cmnd for reset_reload\n");
spin_unlock_irqrestore(pinstance->host->host_lock,
lock_flags);
return reset;
}
if (shutdown_type == SHUTDOWN_NORMAL)
pinstance->ioa_bringdown = 1;
pinstance->ioa_shutdown_type = shutdown_type;
pinstance->reset_cmd = reset_cmd;
pinstance->force_ioa_reset = reset;
pmcraid_info("reset_reload: initiating reset\n");
pmcraid_ioa_reset(reset_cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
pmcraid_info("reset_reload: waiting for reset to complete\n");
wait_event(pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress);
pmcraid_info("reset_reload: reset is complete !!\n");
scsi_unblock_requests(pinstance->host);
if (pinstance->ioa_state == target_state)
reset = 0;
}
return reset;
}
/**
* pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
*
* @pinstance: pointer to adapter instance structure
*
* Return Value
* whatever is returned from pmcraid_reset_reload
*/
static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
{
return pmcraid_reset_reload(pinstance,
SHUTDOWN_NORMAL,
IOA_STATE_UNKNOWN);
}
/**
* pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
*
* @pinstance: pointer to adapter instance structure
*
* Return Value
* whatever is returned from pmcraid_reset_reload
*/
static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
{
pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
return pmcraid_reset_reload(pinstance,
SHUTDOWN_NONE,
IOA_STATE_OPERATIONAL);
}
/**
* pmcraid_request_sense - Send request sense to a device
* @cmd: pmcraid command struct
*
* This function sends a request sense to a device as a result of a check
* condition. This method re-uses the same command block that failed earlier.
*/
static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
/* allocate DMAable memory for sense buffers */
cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
SCSI_SENSE_BUFFERSIZE,
&cmd->sense_buffer_dma);
if (cmd->sense_buffer == NULL) {
pmcraid_err
("couldn't allocate sense buffer for request sense\n");
pmcraid_erp_done(cmd);
return;
}
/* re-use the command block */
memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->request_flags0 = (SYNC_COMPLETE |
NO_LINK_DESCS |
INHIBIT_UL_CHECK);
ioarcb->request_type = REQ_TYPE_SCSI;
ioarcb->cdb[0] = REQUEST_SENSE;
ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
/* request sense might be called as part of error response processing
* which runs in tasklets context. It is possible that mid-layer might
* schedule queuecommand during this time, hence, writting to IOARRIN
* must be protect by host_lock
*/
pmcraid_send_cmd(cmd, pmcraid_erp_done,
PMCRAID_REQUEST_SENSE_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
* @cmd: command that failed
* @sense: true if request_sense is required after cancel all
*
* This function sends a cancel all to a device to clear the queue.
*/
static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
: pmcraid_request_sense;
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->request_flags0 = SYNC_OVERRIDE;
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
if (RES_IS_GSCSI(res->cfg_entry))
ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
ioarcb->ioadl_bus_addr = 0;
ioarcb->ioadl_length = 0;
ioarcb->data_transfer_length = 0;
ioarcb->ioarcb_bus_addr &= (~0x1FULL);
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
*/
pmcraid_send_cmd(cmd, cmd_done,
PMCRAID_REQUEST_SENSE_TIMEOUT,
pmcraid_timeout_handler);
}
/**
* pmcraid_frame_auto_sense: frame fixed format sense information
*
* @cmd: pointer to failing command block
*
* Return value
* none
*/
static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
{
u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
u32 ioasc = le32_to_cpu(ioasa->ioasc);
u32 failing_lba = 0;
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
if (RES_IS_VSET(res->cfg_entry) &&
ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
ioasa->u.vset.failing_lba_hi != 0) {
sense_buf[0] = 0x72;
sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
sense_buf[7] = 12;
sense_buf[8] = 0;
sense_buf[9] = 0x0A;
sense_buf[10] = 0x80;
failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
sense_buf[12] = (failing_lba & 0xff000000) >> 24;
sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
sense_buf[15] = failing_lba & 0x000000ff;
failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
sense_buf[16] = (failing_lba & 0xff000000) >> 24;
sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
sense_buf[19] = failing_lba & 0x000000ff;
} else {
sense_buf[0] = 0x70;
sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
if (RES_IS_VSET(res->cfg_entry))
failing_lba =
le32_to_cpu(ioasa->u.
vset.failing_lba_lo);
sense_buf[0] |= 0x80;
sense_buf[3] = (failing_lba >> 24) & 0xff;
sense_buf[4] = (failing_lba >> 16) & 0xff;
sense_buf[5] = (failing_lba >> 8) & 0xff;
sense_buf[6] = failing_lba & 0xff;
}
sense_buf[7] = 6; /* additional length */
}
}
/**
* pmcraid_error_handler - Error response handlers for a SCSI op
* @cmd: pointer to pmcraid_cmd that has failed
*
* This function determines whether or not to initiate ERP on the affected
* device. This is called from a tasklet, which doesn't hold any locks.
*
* Return value:
* 0 it caller can complete the request, otherwise 1 where in error
* handler itself completes the request and returns the command block
* back to free-pool
*/
static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
u32 ioasc = le32_to_cpu(ioasa->ioasc);
u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
u32 sense_copied = 0;
if (!res) {
pmcraid_info("resource pointer is NULL\n");
return 0;
}
/* If this was a SCSI read/write command keep count of errors */
if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
atomic_inc(&res->read_failures);
else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
atomic_inc(&res->write_failures);
if (!RES_IS_GSCSI(res->cfg_entry) &&
masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
pmcraid_frame_auto_sense(cmd);
}
/* Log IOASC/IOASA information based on user settings */
pmcraid_ioasc_logger(ioasc, cmd);
switch (masked_ioasc) {
case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
scsi_cmd->result |= (DID_ABORT << 16);
break;
case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
scsi_cmd->result |= (DID_NO_CONNECT << 16);
break;
case PMCRAID_IOASC_NR_SYNC_REQUIRED:
res->sync_reqd = 1;
scsi_cmd->result |= (DID_IMM_RETRY << 16);
break;
case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break;
case PMCRAID_IOASC_UA_BUS_WAS_RESET:
case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
if (!res->reset_progress)
scsi_report_bus_reset(pinstance->host,
scsi_cmd->device->channel);
scsi_cmd->result |= (DID_ERROR << 16);
break;
case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
res->sync_reqd = 1;
/* if check_condition is not active return with error otherwise
* get/frame the sense buffer
*/
if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
SAM_STAT_CHECK_CONDITION &&
PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
return 0;
/* If we have auto sense data as part of IOASA pass it to
* mid-layer
*/
if (ioasa->auto_sense_length != 0) {
short sense_len = ioasa->auto_sense_length;
int data_size = min_t(u16, le16_to_cpu(sense_len),
SCSI_SENSE_BUFFERSIZE);
memcpy(scsi_cmd->sense_buffer,
ioasa->sense_data,
data_size);
sense_copied = 1;
}
if (RES_IS_GSCSI(res->cfg_entry))
pmcraid_cancel_all(cmd, sense_copied);
else if (sense_copied)
pmcraid_erp_done(cmd);
else
pmcraid_request_sense(cmd);
return 1;
case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
break;
default:
if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
scsi_cmd->result |= (DID_ERROR << 16);
break;
}
return 0;
}
/**
* pmcraid_reset_device - device reset handler functions
*
* @scsi_cmd: scsi command struct
* @modifier: reset modifier indicating the reset sequence to be performed
*
* This function issues a device reset to the affected device.
* A LUN reset will be sent to the device first. If that does
* not work, a target reset will be sent.
*
* Return value:
* SUCCESS / FAILED
*/
static int pmcraid_reset_device(
struct scsi_cmnd *scsi_cmd,
unsigned long timeout,
u8 modifier
)
{
struct pmcraid_cmd *cmd;
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
struct pmcraid_ioarcb *ioarcb;
unsigned long lock_flags;
u32 ioasc;
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata;
if (!res) {
sdev_printk(KERN_ERR, scsi_cmd->device,
"reset_device: NULL resource pointer\n");
return FAILED;
}
/* If adapter is currently going through reset/reload, return failed.
* This will force the mid-layer to call _eh_bus/host reset, which
* will then go to sleep and wait for the reset to complete
*/
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_reset_in_progress ||
pinstance->ioa_state == IOA_STATE_DEAD) {
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
return FAILED;
}
res->reset_progress = 1;
pmcraid_info("Resetting %s resource with addr %x\n",
((modifier & RESET_DEVICE_LUN) ? "LUN" :
((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
le32_to_cpu(res->cfg_entry.resource_address));
/* get a free cmd block */
cmd = pmcraid_get_free_cmd(pinstance);
if (cmd == NULL) {
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
pmcraid_err("%s: no cmd blocks are available\n", __func__);
return FAILED;
}
ioarcb = &cmd->ioa_cb->ioarcb;
ioarcb->resource_handle = res->cfg_entry.resource_handle;
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
/* Initialize reset modifier bits */
if (modifier)
modifier = ENABLE_RESET_MODIFIER | modifier;
ioarcb->cdb[1] = modifier;
init_completion(&cmd->wait_for_completion);
cmd->completion_req = 1;
pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
pmcraid_send_cmd(cmd,
pmcraid_internal_done,
timeout,
pmcraid_timeout_handler);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
/* RESET_DEVICE command completes after all pending IOARCBs are
* completed. Once this command is completed, pmcraind_internal_done
* will wake up the 'completion' queue.
*/
wait_for_completion(&cmd->wait_for_completion);
/* complete the command here itself and return the command block
* to free list
*/
pmcraid_return_cmd(cmd);
res->reset_progress = 0;
ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
/* set the return value based on the returned ioasc */
return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
* _pmcraid_io_done - helper for pmcraid_io_done function
*
* @cmd: pointer to pmcraid command struct
* @reslen: residual data length to be set in the ioasa
* @ioasc: ioasc either returned by IOA or set by driver itself.
*
* This function is invoked by pmcraid_io_done to complete mid-layer
* scsi ops.
*
* Return value:
* 0 if caller is required to return it to free_pool. Returns 1 if
* caller need not worry about freeing command block as error handler
* will take care of that.
*/
static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
int rc = 0;
scsi_set_resid(scsi_cmd, reslen);
pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
ioasc, scsi_cmd->result);
if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
rc = pmcraid_error_handler(cmd);
if (rc == 0) {
scsi_dma_unmap(scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
}
return rc;
}
/**
* pmcraid_io_done - SCSI completion function
*
* @cmd: pointer to pmcraid command struct
*
* This function is invoked by tasklet/mid-layer error handler to completing
* the SCSI ops sent from mid-layer.
*
* Return value
* none
*/
static void pmcraid_io_done(struct pmcraid_cmd *cmd)
{
u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
pmcraid_return_cmd(cmd);
}
/**
* pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
*
* @cmd: command block of the command to be aborted
*
* Return Value:
* returns pointer to command structure used as cancelling cmd
*/
static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_cmd *cancel_cmd;
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
pinstance = (struct pmcraid_instance *)cmd->drv_inst;
res = cmd->scsi_cmd->device->hostdata;
cancel_cmd = pmcraid_get_free_cmd(pinstance);
if (cancel_cmd == NULL) {
pmcraid_err("%s: no cmd blocks are available\n", __func__);
return NULL;
}
pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
cmd->ioa_cb->ioarcb.cdb[0],
cmd->ioa_cb->ioarcb.response_handle >> 2);
init_completion(&cancel_cmd->wait_for_completion);
cancel_cmd->completion_req = 1;
pmcraid_info("command (%d) CDB[0] = %x for %x\n",
le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
cancel_cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
pmcraid_send_cmd(cancel_cmd,
pmcraid_internal_done,
PMCRAID_INTERNAL_TIMEOUT,
pmcraid_timeout_handler);
return cancel_cmd;
}
/**
* pmcraid_abort_complete - Waits for ABORT TASK completion
*
* @cancel_cmd: command block use as cancelling command
*
* Return Value:
* returns SUCCESS if ABORT TASK has good completion
* otherwise FAILED
*/
static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
{
struct pmcraid_resource_entry *res;
u32 ioasc;
wait_for_completion(&cancel_cmd->wait_for_completion);
res = cancel_cmd->res;
cancel_cmd->res = NULL;
ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
/* If the abort task is not timed out we will get a Good completion
* as sense_key, otherwise we may get one the following responses
* due to subsequent bus reset or device reset. In case IOASC is
* NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
*/
if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
res->sync_reqd = 1;
ioasc = 0;
}
/* complete the command here itself */
pmcraid_return_cmd(cancel_cmd);
return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
* pmcraid_eh_abort_handler - entry point for aborting a single task on errors
*
* @scsi_cmd: scsi command struct given by mid-layer. When this is called
* mid-layer ensures that no other commands are queued. This
* never gets called under interrupt, but a separate eh thread.
*
* Return value:
* SUCCESS / FAILED
*/
static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
{
struct pmcraid_instance *pinstance;
struct pmcraid_cmd *cmd;
struct pmcraid_resource_entry *res;
unsigned long host_lock_flags;
unsigned long pending_lock_flags;
struct pmcraid_cmd *cancel_cmd = NULL;
int cmd_found = 0;
int rc = FAILED;
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
scmd_printk(KERN_INFO, scsi_cmd,
"I/O command timed out, aborting it.\n");
res = scsi_cmd->device->hostdata;
if (res == NULL)
return rc;
/* If we are currently going through reset/reload, return failed.
* This will force the mid-layer to eventually call
* pmcraid_eh_host_reset which will then go to sleep and wait for the
* reset to complete
*/
spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
if (pinstance->ioa_reset_in_progress ||
pinstance->ioa_state == IOA_STATE_DEAD) {
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
return rc;
}
/* loop over pending cmd list to find cmd corresponding to this
* scsi_cmd. Note that this command might not have been completed
* already. locking: all pending commands are protected with
* pending_pool_lock.
*/
spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
if (cmd->scsi_cmd == scsi_cmd) {
cmd_found = 1;
break;
}
}
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
pending_lock_flags);
/* If the command to be aborted was given to IOA and still pending with
* it, send ABORT_TASK to abort this and wait for its completion
*/
if (cmd_found)
cancel_cmd = pmcraid_abort_cmd(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
if (cancel_cmd) {
cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
rc = pmcraid_abort_complete(cancel_cmd);
}
return cmd_found ? rc : SUCCESS;
}
/**
* pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
*
* @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
*
* All these routines invokve pmcraid_reset_device with appropriate parameters.
* Since these are called from mid-layer EH thread, no other IO will be queued
* to the resource being reset. However, control path (IOCTL) may be active so
* it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
* takes care by locking/unlocking host_lock.
*
* Return value
* SUCCESS or FAILED
*/
static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"resetting device due to an I/O command timeout.\n");
return pmcraid_reset_device(scmd,
PMCRAID_INTERNAL_TIMEOUT,
RESET_DEVICE_LUN);
}
static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"Doing bus reset due to an I/O command timeout.\n");
return pmcraid_reset_device(scmd,
PMCRAID_RESET_BUS_TIMEOUT,
RESET_DEVICE_BUS);
}
static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
{
scmd_printk(KERN_INFO, scmd,
"Doing target reset due to an I/O command timeout.\n");
return pmcraid_reset_device(scmd,
PMCRAID_INTERNAL_TIMEOUT,
RESET_DEVICE_TARGET);
}
/**
* pmcraid_eh_host_reset_handler - adapter reset handler callback
*
* @scmd: pointer to scsi_cmd that was sent to a resource of adapter
*
* Initiates adapter reset to bring it up to operational state
*
* Return value
* SUCCESS or FAILED
*/
static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
{
unsigned long interval = 10000; /* 10 seconds interval */
int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)(scmd->device->host->hostdata);
/* wait for an additional 150 seconds just in case firmware could come
* up and if it could complete all the pending commands excluding the
* two HCAM (CCN and LDN).
*/
while (waits--) {
if (atomic_read(&pinstance->outstanding_cmds) <=
PMCRAID_MAX_HCAM_CMD)
return SUCCESS;
msleep(interval);
}
dev_err(&pinstance->pdev->dev,
"Adapter being reset due to an I/O command timeout.\n");
return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
}
/**
* pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
* @scsi_cmd: scsi command struct
*
* Return value
* number of tags or 0 if the task is not tagged
*/
static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
{
char tag[2];
u8 rc = 0;
if (scsi_populate_tag_msg(scsi_cmd, tag)) {
switch (tag[0]) {
case MSG_SIMPLE_TAG:
rc = TASK_TAG_SIMPLE;
break;
case MSG_HEAD_TAG:
rc = TASK_TAG_QUEUE_HEAD;
break;
case MSG_ORDERED_TAG:
rc = TASK_TAG_ORDERED;
break;
};
}
return rc;
}
/**
* pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
* @cmd: pmcraid command struct
* @sgcount: count of scatter-gather elements
*
* Return value
* returns pointer pmcraid_ioadl_desc, initialized to point to internal
* or external IOADLs
*/
struct pmcraid_ioadl_desc *
pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
{
struct pmcraid_ioadl_desc *ioadl;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
int ioadl_count = 0;
if (ioarcb->add_cmd_param_length)
ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
ioarcb->ioadl_length =
sizeof(struct pmcraid_ioadl_desc) * sgcount;
if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
/* external ioadls start at offset 0x80 from control_block
* structure, re-using 24 out of 27 ioadls part of IOARCB.
* It is necessary to indicate to firmware that driver is
* using ioadls to be treated as external to IOARCB.
*/
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->ioadl_bus_addr =
cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[3]));
ioadl = &ioarcb->add_data.u.ioadl[3];
} else {
ioarcb->ioadl_bus_addr =
cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[ioadl_count]));
ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
ioarcb->ioarcb_bus_addr |=
DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
}
return ioadl;
}
/**
* pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
* @pinstance: pointer to adapter instance structure
* @cmd: pmcraid command struct
*
* This function is invoked by queuecommand entry point while sending a command
* to firmware. This builds ioadl descriptors and sets up ioarcb fields.
*
* Return value:
* 0 on success or -1 on failure
*/
static int pmcraid_build_ioadl(
struct pmcraid_instance *pinstance,
struct pmcraid_cmd *cmd
)
{
int i, nseg;
struct scatterlist *sglist;
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
u32 length = scsi_bufflen(scsi_cmd);
if (!length)
return 0;
nseg = scsi_dma_map(scsi_cmd);
if (nseg < 0) {
scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
return -1;
} else if (nseg > PMCRAID_MAX_IOADLS) {
scsi_dma_unmap(scsi_cmd);
scmd_printk(KERN_ERR, scsi_cmd,
"sg count is (%d) more than allowed!\n", nseg);
return -1;
}
/* Initialize IOARCB data transfer length fields */
if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length = cpu_to_le32(length);
ioadl = pmcraid_init_ioadls(cmd, nseg);
/* Initialize IOADL descriptor addresses */
scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
ioadl[i].flags = 0;
}
/* setup last descriptor */
ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
return 0;
}
/**
* pmcraid_free_sglist - Frees an allocated SG buffer list
* @sglist: scatter/gather list pointer
*
* Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
*
* Return value:
* none
*/
static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
{
int i;
for (i = 0; i < sglist->num_sg; i++)
__free_pages(sg_page(&(sglist->scatterlist[i])),
sglist->order);
kfree(sglist);
}
/**
* pmcraid_alloc_sglist - Allocates memory for a SG list
* @buflen: buffer length
*
* Allocates a DMA'able buffer in chunks and assembles a scatter/gather
* list.
*
* Return value
* pointer to sglist / NULL on failure
*/
static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
{
struct pmcraid_sglist *sglist;
struct scatterlist *scatterlist;
struct page *page;
int num_elem, i, j;
int sg_size;
int order;
int bsize_elem;
sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
order = (sg_size > 0) ? get_order(sg_size) : 0;
bsize_elem = PAGE_SIZE * (1 << order);
/* Determine the actual number of sg entries needed */
if (buflen % bsize_elem)
num_elem = (buflen / bsize_elem) + 1;
else
num_elem = buflen / bsize_elem;
/* Allocate a scatter/gather list for the DMA */
sglist = kzalloc(sizeof(struct pmcraid_sglist) +
(sizeof(struct scatterlist) * (num_elem - 1)),
GFP_KERNEL);
if (sglist == NULL)
return NULL;
scatterlist = sglist->scatterlist;
sg_init_table(scatterlist, num_elem);
sglist->order = order;
sglist->num_sg = num_elem;
sg_size = buflen;
for (i = 0; i < num_elem; i++) {
page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
if (!page) {
for (j = i - 1; j >= 0; j--)
__free_pages(sg_page(&scatterlist[j]), order);
kfree(sglist);
return NULL;
}
sg_set_page(&scatterlist[i], page,
sg_size < bsize_elem ? sg_size : bsize_elem, 0);
sg_size -= bsize_elem;
}
return sglist;
}
/**
* pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
* @sglist: scatter/gather list pointer
* @buffer: buffer pointer
* @len: buffer length
* @direction: data transfer direction
*
* Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
*
* Return value:
* 0 on success / other on failure
*/
static int pmcraid_copy_sglist(
struct pmcraid_sglist *sglist,
unsigned long buffer,
u32 len,
int direction
)
{
struct scatterlist *scatterlist;
void *kaddr;
int bsize_elem;
int i;
int rc = 0;
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
scatterlist = sglist->scatterlist;
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
struct page *page = sg_page(&scatterlist[i]);
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
rc = __copy_from_user(kaddr,
(void *)buffer,
bsize_elem);
else
rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
kunmap(page);
if (rc) {
pmcraid_err("failed to copy user data into sg list\n");
return -EFAULT;
}
scatterlist[i].length = bsize_elem;
}
if (len % bsize_elem) {
struct page *page = sg_page(&scatterlist[i]);
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
rc = __copy_from_user(kaddr,
(void *)buffer,
len % bsize_elem);
else
rc = __copy_to_user((void *)buffer,
kaddr,
len % bsize_elem);
kunmap(page);
scatterlist[i].length = len % bsize_elem;
}
if (rc) {
pmcraid_err("failed to copy user data into sg list\n");
rc = -EFAULT;
}
return rc;
}
/**
* pmcraid_queuecommand - Queue a mid-layer request
* @scsi_cmd: scsi command struct
* @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand
* would use cmd block queue locks (free_pool_lock and pending_pool_lock)
*
* Return value:
* 0 on success
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
*/
static int pmcraid_queuecommand_lck(
struct scsi_cmnd *scsi_cmd,
void (*done) (struct scsi_cmnd *)
)
{
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_cmd *cmd;
u32 fw_version;
int rc = 0;
pinstance =
(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
scsi_cmd->scsi_done = done;
res = scsi_cmd->device->hostdata;
scsi_cmd->result = (DID_OK << 16);
/* if adapter is marked as dead, set result to DID_NO_CONNECT complete
* the command
*/
if (pinstance->ioa_state == IOA_STATE_DEAD) {
pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
return 0;
}
/* If IOA reset is in progress, can't queue the commands */
if (pinstance->ioa_reset_in_progress)
return SCSI_MLQUEUE_HOST_BUSY;
/* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
* the command here itself with success return
*/
if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
scsi_cmd->scsi_done(scsi_cmd);
return 0;
}
/* initialize the command and IOARCB to be sent to IOA */
cmd = pmcraid_get_free_cmd(pinstance);
if (cmd == NULL) {
pmcraid_err("free command block is not available\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
cmd->scsi_cmd = scsi_cmd;
ioarcb = &(cmd->ioa_cb->ioarcb);
memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ioarcb->resource_handle = res->cfg_entry.resource_handle;
ioarcb->request_type = REQ_TYPE_SCSI;
/* set hrrq number where the IOA should respond to. Note that all cmds
* generated internally uses hrrq_id 0, exception to this is the cmd
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
* hrrq_id assigned here in queuecommand
*/
ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
pinstance->num_hrrq;
cmd->cmd_done = pmcraid_io_done;
if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
if (scsi_cmd->underflow == 0)
ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
if (res->sync_reqd) {
ioarcb->request_flags0 |= SYNC_COMPLETE;
res->sync_reqd = 0;
}
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd);
if (RES_IS_GSCSI(res->cfg_entry))
ioarcb->request_flags1 |= DELAY_AFTER_RESET;
}
rc = pmcraid_build_ioadl(pinstance, cmd);
pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
le32_to_cpu(ioarcb->response_handle) >> 2,
scsi_cmd->cmnd[0], pinstance->host->unique_id,
RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
PMCRAID_PHYS_BUS_ID,
RES_IS_VSET(res->cfg_entry) ?
(fw_version <= PMCRAID_FW_VERSION_1 ?
res->cfg_entry.unique_flags1 :
res->cfg_entry.array_id & 0xFF) :
RES_TARGET(res->cfg_entry.resource_address),
RES_LUN(res->cfg_entry.resource_address));
if (likely(rc == 0)) {
_pmcraid_fire_command(cmd);
} else {
pmcraid_err("queuecommand could not build ioadl\n");
pmcraid_return_cmd(cmd);
rc = SCSI_MLQUEUE_HOST_BUSY;
}
return rc;
}
static DEF_SCSI_QCMD(pmcraid_queuecommand)
/**
* pmcraid_open -char node "open" entry, allowed only users with admin access
*/
static int pmcraid_chr_open(struct inode *inode, struct file *filep)
{
struct pmcraid_instance *pinstance;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* Populate adapter instance * pointer for use by ioctl */
pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
filep->private_data = pinstance;
return 0;
}
/**
* pmcraid_release - char node "release" entry point
*/
static int pmcraid_chr_release(struct inode *inode, struct file *filep)
{
struct pmcraid_instance *pinstance = filep->private_data;
filep->private_data = NULL;
fasync_helper(-1, filep, 0, &pinstance->aen_queue);
return 0;
}
/**
* pmcraid_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
* event occurs, SIGIO will be sent to all processes in this queue.
*/
static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
{
struct pmcraid_instance *pinstance;
int rc;
pinstance = filep->private_data;
mutex_lock(&pinstance->aen_queue_lock);
rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
mutex_unlock(&pinstance->aen_queue_lock);
return rc;
}
/**
* pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
* commands sent over IOCTL interface
*
* @cmd : pointer to struct pmcraid_cmd
* @buflen : length of the request buffer
* @direction : data transfer direction
*
* Return value
* 0 on success, non-zero error code on failure
*/
static int pmcraid_build_passthrough_ioadls(
struct pmcraid_cmd *cmd,
int buflen,
int direction
)
{
struct pmcraid_sglist *sglist = NULL;
struct scatterlist *sg = NULL;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl;
int i;
sglist = pmcraid_alloc_sglist(buflen);
if (!sglist) {
pmcraid_err("can't allocate memory for passthrough SGls\n");
return -ENOMEM;
}
sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
sglist->scatterlist,
sglist->num_sg, direction);
if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
dev_err(&cmd->drv_inst->pdev->dev,
"Failed to map passthrough buffer!\n");
pmcraid_free_sglist(sglist);
return -EIO;
}
cmd->sglist = sglist;
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
/* Initialize IOADL descriptor addresses */
for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
ioadl[i].flags = 0;
}
/* setup the last descriptor */
ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
return 0;
}
/**
* pmcraid_release_passthrough_ioadls - release passthrough ioadls
*
* @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
* @buflen: size of the request buffer
* @direction: data transfer direction
*
* Return value
* 0 on success, non-zero error code on failure
*/
static void pmcraid_release_passthrough_ioadls(
struct pmcraid_cmd *cmd,
int buflen,
int direction
)
{
struct pmcraid_sglist *sglist = cmd->sglist;
if (buflen > 0) {
pci_unmap_sg(cmd->drv_inst->pdev,
sglist->scatterlist,
sglist->num_sg,
direction);
pmcraid_free_sglist(sglist);
cmd->sglist = NULL;
}
}
/**
* pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
*
* @pinstance: pointer to adapter instance structure
* @cmd: ioctl code
* @arg: pointer to pmcraid_passthrough_buffer user buffer
*
* Return value
* 0 on success, non-zero error code on failure
*/
static long pmcraid_ioctl_passthrough(
struct pmcraid_instance *pinstance,
unsigned int ioctl_cmd,
unsigned int buflen,
unsigned long arg
)
{
struct pmcraid_passthrough_ioctl_buffer *buffer;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cancel_cmd;
unsigned long request_buffer;
unsigned long request_offset;
unsigned long lock_flags;
void *ioasa;
u32 ioasc;
int request_size;
int buffer_size;
u8 access, direction;
int rc = 0;
/* If IOA reset is in progress, wait 10 secs for reset to complete */
if (pinstance->ioa_reset_in_progress) {
rc = wait_event_interruptible_timeout(
pinstance->reset_wait_q,
!pinstance->ioa_reset_in_progress,
msecs_to_jiffies(10000));
if (!rc)
return -ETIMEDOUT;
else if (rc < 0)
return -ERESTARTSYS;
}
/* If adapter is not in operational state, return error */
if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
pmcraid_err("IOA is not operational\n");
return -ENOTTY;
}
buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
pmcraid_err("no memory for passthrough buffer\n");
return -ENOMEM;
}
request_offset =
offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
request_buffer = arg + request_offset;
rc = __copy_from_user(buffer,
(struct pmcraid_passthrough_ioctl_buffer *) arg,
sizeof(struct pmcraid_passthrough_ioctl_buffer));
ioasa =
(void *)(arg +
offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
if (rc) {
pmcraid_err("ioctl: can't copy passthrough buffer\n");
rc = -EFAULT;
goto out_free_buffer;
}
request_size = buffer->ioarcb.data_transfer_length;
if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
access = VERIFY_READ;
direction = DMA_TO_DEVICE;
} else {
access = VERIFY_WRITE;
direction = DMA_FROM_DEVICE;
}
if (request_size > 0) {
rc = access_ok(access, arg, request_offset + request_size);
if (!rc) {
rc = -EFAULT;
goto out_free_buffer;
}
} else if (request_size < 0) {
rc = -EINVAL;
goto out_free_buffer;
}
/* check if we have any additional command parameters */
if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
rc = -EINVAL;
goto out_free_buffer;
}
cmd = pmcraid_get_free_cmd(pinstance);
if (!cmd) {
pmcraid_err("free command block is not available\n");
rc = -ENOMEM;
goto out_free_buffer;
}
cmd->scsi_cmd = NULL;
ioarcb = &(cmd->ioa_cb->ioarcb);
/* Copy the user-provided IOARCB stuff field by field */
ioarcb->resource_handle = buffer->ioarcb.resource_handle;
ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
ioarcb->request_type = buffer->ioarcb.request_type;
ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
if (buffer->ioarcb.add_cmd_param_length) {
ioarcb->add_cmd_param_length =
buffer->ioarcb.add_cmd_param_length;
ioarcb->add_cmd_param_offset =
buffer->ioarcb.add_cmd_param_offset;
memcpy(ioarcb->add_data.u.add_cmd_params,
buffer->ioarcb.add_data.u.add_cmd_params,
buffer->ioarcb.add_cmd_param_length);
}
/* set hrrq number where the IOA should respond to. Note that all cmds
* generated internally uses hrrq_id 0, exception to this is the cmd
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
* hrrq_id assigned here in queuecommand
*/
ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
pinstance->num_hrrq;
if (request_size) {
rc = pmcraid_build_passthrough_ioadls(cmd,
request_size,
direction);
if (rc) {
pmcraid_err("couldn't build passthrough ioadls\n");
goto out_free_buffer;
}
} else if (request_size < 0) {
rc = -EINVAL;
goto out_free_buffer;
}
/* If data is being written into the device, copy the data from user
* buffers
*/
if (direction == DMA_TO_DEVICE && request_size > 0) {
rc = pmcraid_copy_sglist(cmd->sglist,
request_buffer,
request_size,
direction);
if (rc) {
pmcraid_err("failed to copy user buffer\n");
goto out_free_sglist;
}
}
/* passthrough ioctl is a blocking command so, put the user to sleep
* until timeout. Note that a timeout value of 0 means, do timeout.
*/
cmd->cmd_done = pmcraid_internal_done;
init_completion(&cmd->wait_for_completion);
cmd->completion_req = 1;
pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
cmd->ioa_cb->ioarcb.cdb[0],
le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
_pmcraid_fire_command(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
/* NOTE ! Remove the below line once abort_task is implemented
* in firmware. This line disables ioctl command timeout handling logic
* similar to IO command timeout handling, making ioctl commands to wait
* until the command completion regardless of timeout value specified in
* ioarcb
*/
buffer->ioarcb.cmd_timeout = 0;
/* If command timeout is specified put caller to wait till that time,
* otherwise it would be blocking wait. If command gets timed out, it
* will be aborted.
*/
if (buffer->ioarcb.cmd_timeout == 0) {
wait_for_completion(&cmd->wait_for_completion);
} else if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
cmd->ioa_cb->ioarcb.cdb[0]);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
cancel_cmd = pmcraid_abort_cmd(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
if (cancel_cmd) {
wait_for_completion(&cancel_cmd->wait_for_completion);
ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
pmcraid_return_cmd(cancel_cmd);
/* if abort task couldn't find the command i.e it got
* completed prior to aborting, return good completion.
* if command got aborted successfully or there was IOA
* reset due to abort task itself getting timedout then
* return -ETIMEDOUT
*/
if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
rc = -ETIMEDOUT;
goto out_handle_response;
}
}
/* no command block for abort task or abort task failed to abort
* the IOARCB, then wait for 150 more seconds and initiate reset
* sequence after timeout
*/
if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
msecs_to_jiffies(150 * 1000))) {
pmcraid_reset_bringup(cmd->drv_inst);
rc = -ETIMEDOUT;
}
}
out_handle_response:
/* copy entire IOASA buffer and return IOCTL success.
* If copying IOASA to user-buffer fails, return
* EFAULT
*/
if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
sizeof(struct pmcraid_ioasa))) {
pmcraid_err("failed to copy ioasa buffer to user\n");
rc = -EFAULT;
}
/* If the data transfer was from device, copy the data onto user
* buffers
*/
else if (direction == DMA_FROM_DEVICE && request_size > 0) {
rc = pmcraid_copy_sglist(cmd->sglist,
request_buffer,
request_size,
direction);
if (rc) {
pmcraid_err("failed to copy user buffer\n");
rc = -EFAULT;
}
}
out_free_sglist:
pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
pmcraid_return_cmd(cmd);
out_free_buffer:
kfree(buffer);
return rc;
}
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
* @pinstance: pointer to adapter instance structure
* @cmd: ioctl command passed in
* @buflen: length of user_buffer
* @user_buffer: user buffer pointer
*
* Return Value
* 0 in case of success, otherwise appropriate error code
*/
static long pmcraid_ioctl_driver(
struct pmcraid_instance *pinstance,
unsigned int cmd,
unsigned int buflen,
void __user *user_buffer
)
{
int rc = -ENOSYS;
if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
pmcraid_err("ioctl_driver: access fault in request buffer\n");
return -EFAULT;
}
switch (cmd) {
case PMCRAID_IOCTL_RESET_ADAPTER:
pmcraid_reset_bringup(pinstance);
rc = 0;
break;
default:
break;
}
return rc;
}
/**
* pmcraid_check_ioctl_buffer - check for proper access to user buffer
*
* @cmd: ioctl command
* @arg: user buffer
* @hdr: pointer to kernel memory for pmcraid_ioctl_header
*
* Return Value
* negetive error code if there are access issues, otherwise zero.
* Upon success, returns ioctl header copied out of user buffer.
*/
static int pmcraid_check_ioctl_buffer(
int cmd,
void __user *arg,
struct pmcraid_ioctl_header *hdr
)
{
int rc = 0;
int access = VERIFY_READ;
if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
pmcraid_err("couldn't copy ioctl header from user buffer\n");
return -EFAULT;
}
/* check for valid driver signature */
rc = memcmp(hdr->signature,
PMCRAID_IOCTL_SIGNATURE,
sizeof(hdr->signature));
if (rc) {
pmcraid_err("signature verification failed\n");
return -EINVAL;
}
/* check for appropriate buffer access */
if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
access = VERIFY_WRITE;
rc = access_ok(access,
(arg + sizeof(struct pmcraid_ioctl_header)),
hdr->buffer_length);
if (!rc) {
pmcraid_err("access failed for user buffer of size %d\n",
hdr->buffer_length);
return -EFAULT;
}
return 0;
}
/**
* pmcraid_ioctl - char node ioctl entry point
*/
static long pmcraid_chr_ioctl(
struct file *filep,
unsigned int cmd,
unsigned long arg
)
{
struct pmcraid_instance *pinstance = NULL;
struct pmcraid_ioctl_header *hdr = NULL;
int retval = -ENOTTY;
hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
if (!hdr) {
pmcraid_err("faile to allocate memory for ioctl header\n");
return -ENOMEM;
}
retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
if (retval) {
pmcraid_info("chr_ioctl: header check failed\n");
kfree(hdr);
return retval;
}
pinstance = filep->private_data;
if (!pinstance) {
pmcraid_info("adapter instance is not found\n");
kfree(hdr);
return -ENOTTY;
}
switch (_IOC_TYPE(cmd)) {
case PMCRAID_PASSTHROUGH_IOCTL:
/* If ioctl code is to download microcode, we need to block
* mid-layer requests.
*/
if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
scsi_block_requests(pinstance->host);
retval = pmcraid_ioctl_passthrough(pinstance,
cmd,
hdr->buffer_length,
arg);
if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
scsi_unblock_requests(pinstance->host);
break;
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance,
cmd,
hdr->buffer_length,
(void __user *)arg);
break;
default:
retval = -ENOTTY;
break;
}
kfree(hdr);
return retval;
}
/**
* File operations structure for management interface
*/
static const struct file_operations pmcraid_fops = {
.owner = THIS_MODULE,
.open = pmcraid_chr_open,
.release = pmcraid_chr_release,
.fasync = pmcraid_chr_fasync,
.unlocked_ioctl = pmcraid_chr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = pmcraid_chr_ioctl,
#endif
.llseek = noop_llseek,
};
/**
* pmcraid_show_log_level - Display adapter's error logging level
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_show_log_level(
struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
}
/**
* pmcraid_store_log_level - Change the adapter's error logging level
* @dev: class device struct
* @buf: buffer
* @count: not used
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_store_log_level(
struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count
)
{
struct Scsi_Host *shost;
struct pmcraid_instance *pinstance;
unsigned long val;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
/* log-level should be from 0 to 2 */
if (val > 2)
return -EINVAL;
shost = class_to_shost(dev);
pinstance = (struct pmcraid_instance *)shost->hostdata;
pinstance->current_log_level = val;
return strlen(buf);
}
static struct device_attribute pmcraid_log_level_attr = {
.attr = {
.name = "log_level",
.mode = S_IRUGO | S_IWUSR,
},
.show = pmcraid_show_log_level,
.store = pmcraid_store_log_level,
};
/**
* pmcraid_show_drv_version - Display driver version
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_show_drv_version(
struct device *dev,
struct device_attribute *attr,
char *buf
)
{
return snprintf(buf, PAGE_SIZE, "version: %s\n",
PMCRAID_DRIVER_VERSION);
}
static struct device_attribute pmcraid_driver_version_attr = {
.attr = {
.name = "drv_version",
.mode = S_IRUGO,
},
.show = pmcraid_show_drv_version,
};
/**
* pmcraid_show_io_adapter_id - Display driver assigned adapter id
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
*/
static ssize_t pmcraid_show_adapter_id(
struct device *dev,
struct device_attribute *attr,
char *buf
)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
u32 adapter_id = (pinstance->pdev->bus->number << 8) |
pinstance->pdev->devfn;
u32 aen_group = pmcraid_event_family.id;
return snprintf(buf, PAGE_SIZE,
"adapter id: %d\nminor: %d\naen group: %d\n",
adapter_id, MINOR(pinstance->cdev.dev), aen_group);
}
static struct device_attribute pmcraid_adapter_id_attr = {
.attr = {
.name = "adapter_id",
.mode = S_IRUGO | S_IWUSR,
},
.show = pmcraid_show_adapter_id,
};
static struct device_attribute *pmcraid_host_attrs[] = {
&pmcraid_log_level_attr,
&pmcraid_driver_version_attr,
&pmcraid_adapter_id_attr,
NULL,
};
/* host template structure for pmcraid driver */
static struct scsi_host_template pmcraid_host_template = {
.module = THIS_MODULE,
.name = PMCRAID_DRIVER_NAME,
.queuecommand = pmcraid_queuecommand,
.eh_abort_handler = pmcraid_eh_abort_handler,
.eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
.eh_target_reset_handler = pmcraid_eh_target_reset_handler,
.eh_device_reset_handler = pmcraid_eh_device_reset_handler,
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
.slave_alloc = pmcraid_slave_alloc,
.slave_configure = pmcraid_slave_configure,
.slave_destroy = pmcraid_slave_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
.change_queue_type = pmcraid_change_queue_type,
.can_queue = PMCRAID_MAX_IO_CMD,
.this_id = -1,
.sg_tablesize = PMCRAID_MAX_IOADLS,
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = pmcraid_host_attrs,
.proc_name = PMCRAID_DRIVER_NAME
};
/*
* pmcraid_isr_msix - implements MSI-X interrupt handling routine
* @irq: interrupt vector number
* @dev_id: pointer hrrq_vector
*
* Return Value
* IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
*/
static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
{
struct pmcraid_isr_param *hrrq_vector;
struct pmcraid_instance *pinstance;
unsigned long lock_flags;
u32 intrs_val;
int hrrq_id;
hrrq_vector = (struct pmcraid_isr_param *)dev_id;
hrrq_id = hrrq_vector->hrrq_id;
pinstance = hrrq_vector->drv_inst;
if (!hrrq_id) {
/* Read the interrupt */
intrs_val = pmcraid_read_interrupts(pinstance);
if (intrs_val &&
((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
& DOORBELL_INTR_MSIX_CLR) == 0)) {
/* Any error interrupts including unit_check,
* initiate IOA reset.In case of unit check indicate
* to reset_sequence that IOA unit checked and prepare
* for a dump during reset sequence
*/
if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
if (intrs_val & INTRS_IOA_UNIT_CHECK)
pinstance->ioa_unit_check = 1;
pmcraid_err("ISR: error interrupts: %x \
initiating reset\n", intrs_val);
spin_lock_irqsave(pinstance->host->host_lock,
lock_flags);
pmcraid_initiate_reset(pinstance);
spin_unlock_irqrestore(
pinstance->host->host_lock,
lock_flags);
}
/* If interrupt was as part of the ioa initialization,
* clear it. Delete the timer and wakeup the
* reset engine to proceed with reset sequence
*/
if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
pmcraid_clr_trans_op(pinstance);
/* Clear the interrupt register by writing
* to host to ioa doorbell. Once done
* FW will clear the interrupt.
*/
iowrite32(DOORBELL_INTR_MSIX_CLR,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
}
}
tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
return IRQ_HANDLED;
}
/**
* pmcraid_isr - implements legacy interrupt handling routine
*
* @irq: interrupt vector number
* @dev_id: pointer hrrq_vector
*
* Return Value
* IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
*/
static irqreturn_t pmcraid_isr(int irq, void *dev_id)
{
struct pmcraid_isr_param *hrrq_vector;
struct pmcraid_instance *pinstance;
u32 intrs;
unsigned long lock_flags;
int hrrq_id = 0;
/* In case of legacy interrupt mode where interrupts are shared across
* isrs, it may be possible that the current interrupt is not from IOA
*/
if (!dev_id) {
printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
return IRQ_NONE;
}
hrrq_vector = (struct pmcraid_isr_param *)dev_id;
pinstance = hrrq_vector->drv_inst;
intrs = pmcraid_read_interrupts(pinstance);
if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
return IRQ_NONE;
/* Any error interrupts including unit_check, initiate IOA reset.
* In case of unit check indicate to reset_sequence that IOA unit
* checked and prepare for a dump during reset sequence
*/
if (intrs & PMCRAID_ERROR_INTERRUPTS) {
if (intrs & INTRS_IOA_UNIT_CHECK)
pinstance->ioa_unit_check = 1;
iowrite32(intrs,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
pmcraid_err("ISR: error interrupts: %x initiating reset\n",
intrs);
intrs = ioread32(
pinstance->int_regs.ioa_host_interrupt_clr_reg);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
pmcraid_initiate_reset(pinstance);
spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
} else {
/* If interrupt was as part of the ioa initialization,
* clear. Delete the timer and wakeup the
* reset engine to proceed with reset sequence
*/
if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
pmcraid_clr_trans_op(pinstance);
} else {
iowrite32(intrs,
pinstance->int_regs.ioa_host_interrupt_clr_reg);
ioread32(
pinstance->int_regs.ioa_host_interrupt_clr_reg);
tasklet_schedule(
&(pinstance->isr_tasklet[hrrq_id]));
}
}
return IRQ_HANDLED;
}
/**
* pmcraid_worker_function - worker thread function
*
* @workp: pointer to struct work queue
*
* Return Value
* None
*/
static void pmcraid_worker_function(struct work_struct *workp)
{
struct pmcraid_instance *pinstance;
struct pmcraid_resource_entry *res;
struct pmcraid_resource_entry *temp;
struct scsi_device *sdev;
unsigned long lock_flags;
unsigned long host_lock_flags;
u16 fw_version;
u8 bus, target, lun;
pinstance = container_of(workp, struct pmcraid_instance, worker_q);
/* add resources only after host is added into system */
if (!atomic_read(&pinstance->expose_resources))
return;
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
sdev = res->scsi_dev;
/* host_lock must be held before calling
* scsi_device_get
*/
spin_lock_irqsave(pinstance->host->host_lock,
host_lock_flags);
if (!scsi_device_get(sdev)) {
spin_unlock_irqrestore(
pinstance->host->host_lock,
host_lock_flags);
pmcraid_info("deleting %x from midlayer\n",
res->cfg_entry.resource_address);
list_move_tail(&res->queue,
&pinstance->free_res_q);
spin_unlock_irqrestore(
&pinstance->resource_lock,
lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(&pinstance->resource_lock,
lock_flags);
res->change_detected = 0;
} else {
spin_unlock_irqrestore(
pinstance->host->host_lock,
host_lock_flags);
}
}
}
list_for_each_entry(res, &pinstance->used_res_q, queue) {
if (res->change_detected == RES_CHANGE_ADD) {
if (!pmcraid_expose_resource(fw_version,
&res->cfg_entry))
continue;
if (RES_IS_VSET(res->cfg_entry)) {
bus = PMCRAID_VSET_BUS_ID;
if (fw_version <= PMCRAID_FW_VERSION_1)
target = res->cfg_entry.unique_flags1;
else
target = res->cfg_entry.array_id & 0xFF;
lun = PMCRAID_VSET_LUN_ID;
} else {
bus = PMCRAID_PHYS_BUS_ID;
target =
RES_TARGET(
res->cfg_entry.resource_address);
lun = RES_LUN(res->cfg_entry.resource_address);
}
res->change_detected = 0;
spin_unlock_irqrestore(&pinstance->resource_lock,
lock_flags);
scsi_add_device(pinstance->host, bus, target, lun);
spin_lock_irqsave(&pinstance->resource_lock,
lock_flags);
}
}
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
}
/**
* pmcraid_tasklet_function - Tasklet function
*
* @instance: pointer to msix param structure
*
* Return Value
* None
*/
static void pmcraid_tasklet_function(unsigned long instance)
{
struct pmcraid_isr_param *hrrq_vector;
struct pmcraid_instance *pinstance;
unsigned long hrrq_lock_flags;
unsigned long pending_lock_flags;
unsigned long host_lock_flags;
spinlock_t *lockp; /* hrrq buffer lock */
int id;
__le32 resp;
hrrq_vector = (struct pmcraid_isr_param *)instance;
pinstance = hrrq_vector->drv_inst;
id = hrrq_vector->hrrq_id;
lockp = &(pinstance->hrrq_lock[id]);
/* loop through each of the commands responded by IOA. Each HRRQ buf is
* protected by its own lock. Traversals must be done within this lock
* as there may be multiple tasklets running on multiple CPUs. Note
* that the lock is held just for picking up the response handle and
* manipulating hrrq_curr/toggle_bit values.
*/
spin_lock_irqsave(lockp, hrrq_lock_flags);
resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
while ((resp & HRRQ_TOGGLE_BIT) ==
pinstance->host_toggle_bit[id]) {
int cmd_index = resp >> 2;
struct pmcraid_cmd *cmd = NULL;
if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
pinstance->hrrq_curr[id]++;
} else {
pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
pinstance->host_toggle_bit[id] ^= 1u;
}
if (cmd_index >= PMCRAID_MAX_CMD) {
/* In case of invalid response handle, log message */
pmcraid_err("Invalid response handle %d\n", cmd_index);
resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
continue;
}
cmd = pinstance->cmd_list[cmd_index];
spin_unlock_irqrestore(lockp, hrrq_lock_flags);
spin_lock_irqsave(&pinstance->pending_pool_lock,
pending_lock_flags);
list_del(&cmd->free_list);
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
pending_lock_flags);
del_timer(&cmd->timer);
atomic_dec(&pinstance->outstanding_cmds);
if (cmd->cmd_done == pmcraid_ioa_reset) {
spin_lock_irqsave(pinstance->host->host_lock,
host_lock_flags);
cmd->cmd_done(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock,
host_lock_flags);
} else if (cmd->cmd_done != NULL) {
cmd->cmd_done(cmd);
}
/* loop over until we are done with all responses */
spin_lock_irqsave(lockp, hrrq_lock_flags);
resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
}
spin_unlock_irqrestore(lockp, hrrq_lock_flags);
}
/**
* pmcraid_unregister_interrupt_handler - de-register interrupts handlers
* @pinstance: pointer to adapter instance structure
*
* This routine un-registers registered interrupt handler and
* also frees irqs/vectors.
*
* Retun Value
* None
*/
static
void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
{
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
free_irq(pinstance->hrrq_vector[i].vector,
&(pinstance->hrrq_vector[i]));
if (pinstance->interrupt_mode) {
pci_disable_msix(pinstance->pdev);
pinstance->interrupt_mode = 0;
}
}
/**
* pmcraid_register_interrupt_handler - registers interrupt handler
* @pinstance: pointer to per-adapter instance structure
*
* Return Value
* 0 on success, non-zero error code otherwise.
*/
static int
pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
{
int rc;
struct pci_dev *pdev = pinstance->pdev;
if ((pmcraid_enable_msix) &&
(pci_find_capability(pdev, PCI_CAP_ID_MSIX))) {
int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
int i;
for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
entries[i].entry = i;
rc = pci_enable_msix(pdev, entries, num_hrrq);
if (rc < 0)
goto pmcraid_isr_legacy;
/* Check how many MSIX vectors are allocated and register
* msi-x handlers for each of them giving appropriate buffer
*/
if (rc > 0) {
num_hrrq = rc;
if (pci_enable_msix(pdev, entries, num_hrrq))
goto pmcraid_isr_legacy;
}
for (i = 0; i < num_hrrq; i++) {
pinstance->hrrq_vector[i].hrrq_id = i;
pinstance->hrrq_vector[i].drv_inst = pinstance;
pinstance->hrrq_vector[i].vector = entries[i].vector;
rc = request_irq(pinstance->hrrq_vector[i].vector,
pmcraid_isr_msix, 0,
PMCRAID_DRIVER_NAME,
&(pinstance->hrrq_vector[i]));
if (rc) {
int j;
for (j = 0; j < i; j++)
free_irq(entries[j].vector,
&(pinstance->hrrq_vector[j]));
pci_disable_msix(pdev);
goto pmcraid_isr_legacy;
}
}
pinstance->num_hrrq = num_hrrq;
pinstance->interrupt_mode = 1;
iowrite32(DOORBELL_INTR_MODE_MSIX,
pinstance->int_regs.host_ioa_interrupt_reg);
ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
goto pmcraid_isr_out;
}
pmcraid_isr_legacy:
/* If MSI-X registration failed fallback to legacy mode, where
* only one hrrq entry will be used
*/
pinstance->hrrq_vector[0].hrrq_id = 0;
pinstance->hrrq_vector[0].drv_inst = pinstance;
pinstance->hrrq_vector[0].vector = pdev->irq;
pinstance->num_hrrq = 1;
rc = 0;
rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
pmcraid_isr_out:
return rc;
}
/**
* pmcraid_release_cmd_blocks - release buufers allocated for command blocks
* @pinstance: per adapter instance structure pointer
* @max_index: number of buffer blocks to release
*
* Return Value
* None
*/
static void
pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
{
int i;
for (i = 0; i < max_index; i++) {
kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
pinstance->cmd_list[i] = NULL;
}
kmem_cache_destroy(pinstance->cmd_cachep);
pinstance->cmd_cachep = NULL;
}
/**
* pmcraid_release_control_blocks - releases buffers alloced for control blocks
* @pinstance: pointer to per adapter instance structure
* @max_index: number of buffers (from 0 onwards) to release
*
* This function assumes that the command blocks for which control blocks are
* linked are not released.
*
* Return Value
* None
*/
static void
pmcraid_release_control_blocks(
struct pmcraid_instance *pinstance,
int max_index
)
{
int i;
if (pinstance->control_pool == NULL)
return;
for (i = 0; i < max_index; i++) {
pci_pool_free(pinstance->control_pool,
pinstance->cmd_list[i]->ioa_cb,
pinstance->cmd_list[i]->ioa_cb_bus_addr);
pinstance->cmd_list[i]->ioa_cb = NULL;
pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
}
pci_pool_destroy(pinstance->control_pool);
pinstance->control_pool = NULL;
}
/**
* pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
* @pinstance - pointer to per adapter instance structure
*
* Allocates memory for command blocks using kernel slab allocator.
*
* Return Value
* 0 in case of success; -ENOMEM in case of failure
*/
static int __devinit
pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
{
int i;
sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
pinstance->host->unique_id);
pinstance->cmd_cachep = kmem_cache_create(
pinstance->cmd_pool_name,
sizeof(struct pmcraid_cmd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!pinstance->cmd_cachep)
return -ENOMEM;
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i] =
kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
if (!pinstance->cmd_list[i]) {
pmcraid_release_cmd_blocks(pinstance, i);
return -ENOMEM;
}
}
return 0;
}
/**
* pmcraid_allocate_control_blocks - allocates memory control blocks
* @pinstance : pointer to per adapter instance structure
*
* This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
* and IOASAs. This is called after command blocks are already allocated.
*
* Return Value
* 0 in case it can allocate all control blocks, otherwise -ENOMEM
*/
static int __devinit
pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
{
int i;
sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
pinstance->host->unique_id);
pinstance->control_pool =
pci_pool_create(pinstance->ctl_pool_name,
pinstance->pdev,
sizeof(struct pmcraid_control_block),
PMCRAID_IOARCB_ALIGNMENT, 0);
if (!pinstance->control_pool)
return -ENOMEM;
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb =
pci_pool_alloc(
pinstance->control_pool,
GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
if (!pinstance->cmd_list[i]->ioa_cb) {
pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM;
}
memset(pinstance->cmd_list[i]->ioa_cb, 0,
sizeof(struct pmcraid_control_block));
}
return 0;
}
/**
* pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
* @pinstance: pointer to per adapter instance structure
* @maxindex: size of hrrq buffer pointer array
*
* Return Value
* None
*/
static void
pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
{
int i;
for (i = 0; i < maxindex; i++) {
pci_free_consistent(pinstance->pdev,
HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
pinstance->hrrq_start[i],
pinstance->hrrq_start_bus_addr[i]);
/* reset pointers and toggle bit to zeros */
pinstance->hrrq_start[i] = NULL;
pinstance->hrrq_start_bus_addr[i] = 0;
pinstance->host_toggle_bit[i] = 0;
}
}
/**
* pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
* @pinstance: pointer to per adapter instance structure
*
* Return value
* 0 hrrq buffers are allocated, -ENOMEM otherwise.
*/
static int __devinit
pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
{
int i, buffer_size;
buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
for (i = 0; i < pinstance->num_hrrq; i++) {
pinstance->hrrq_start[i] =
pci_alloc_consistent(
pinstance->pdev,
buffer_size,
&(pinstance->hrrq_start_bus_addr[i]));
if (pinstance->hrrq_start[i] == 0) {
pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
i);
pmcraid_release_host_rrqs(pinstance, i);
return -ENOMEM;
}
memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
pinstance->host_toggle_bit[i] = 1;
spin_lock_init(&pinstance->hrrq_lock[i]);
}
return 0;
}
/**
* pmcraid_release_hcams - release HCAM buffers
*
* @pinstance: pointer to per adapter instance structure
*
* Return value
* none
*/
static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
{
if (pinstance->ccn.msg != NULL) {
pci_free_consistent(pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
pinstance->ccn.msg,
pinstance->ccn.baddr);
pinstance->ccn.msg = NULL;
pinstance->ccn.hcam = NULL;
pinstance->ccn.baddr = 0;
}
if (pinstance->ldn.msg != NULL) {
pci_free_consistent(pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
pinstance->ldn.msg,
pinstance->ldn.baddr);
pinstance->ldn.msg = NULL;
pinstance->ldn.hcam = NULL;
pinstance->ldn.baddr = 0;
}
}
/**
* pmcraid_allocate_hcams - allocates HCAM buffers
* @pinstance : pointer to per adapter instance structure
*
* Return Value:
* 0 in case of successful allocation, non-zero otherwise
*/
static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
{
pinstance->ccn.msg = pci_alloc_consistent(
pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
&(pinstance->ccn.baddr));
pinstance->ldn.msg = pci_alloc_consistent(
pinstance->pdev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
&(pinstance->ldn.baddr));
if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
pmcraid_release_hcams(pinstance);
} else {
pinstance->ccn.hcam =
(void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
pinstance->ldn.hcam =
(void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
atomic_set(&pinstance->ccn.ignore, 0);
atomic_set(&pinstance->ldn.ignore, 0);
}
return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
}
/**
* pmcraid_release_config_buffers - release config.table buffers
* @pinstance: pointer to per adapter instance structure
*
* Return Value
* none
*/
static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
{
if (pinstance->cfg_table != NULL &&
pinstance->cfg_table_bus_addr != 0) {
pci_free_consistent(pinstance->pdev,
sizeof(struct pmcraid_config_table),
pinstance->cfg_table,
pinstance->cfg_table_bus_addr);
pinstance->cfg_table = NULL;
pinstance->cfg_table_bus_addr = 0;
}
if (pinstance->res_entries != NULL) {
int i;
for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
list_del(&pinstance->res_entries[i].queue);
kfree(pinstance->res_entries);
pinstance->res_entries = NULL;
}
pmcraid_release_hcams(pinstance);
}
/**
* pmcraid_allocate_config_buffers - allocates DMAable memory for config table
* @pinstance : pointer to per adapter instance structure
*
* Return Value
* 0 for successful allocation, -ENOMEM for any failure
*/
static int __devinit
pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
{
int i;
pinstance->res_entries =
kzalloc(sizeof(struct pmcraid_resource_entry) *
PMCRAID_MAX_RESOURCES, GFP_KERNEL);
if (NULL == pinstance->res_entries) {
pmcraid_err("failed to allocate memory for resource table\n");
return -ENOMEM;
}
for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
list_add_tail(&pinstance->res_entries[i].queue,
&pinstance->free_res_q);
pinstance->cfg_table =
pci_alloc_consistent(pinstance->pdev,
sizeof(struct pmcraid_config_table),
&pinstance->cfg_table_bus_addr);
if (NULL == pinstance->cfg_table) {
pmcraid_err("couldn't alloc DMA memory for config table\n");
pmcraid_release_config_buffers(pinstance);
return -ENOMEM;
}
if (pmcraid_allocate_hcams(pinstance)) {
pmcraid_err("could not alloc DMA memory for HCAMS\n");
pmcraid_release_config_buffers(pinstance);
return -ENOMEM;
}
return 0;
}
/**
* pmcraid_init_tasklets - registers tasklets for response handling
*
* @pinstance: pointer adapter instance structure
*
* Return value
* none
*/
static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
{
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
tasklet_init(&pinstance->isr_tasklet[i],
pmcraid_tasklet_function,
(unsigned long)&pinstance->hrrq_vector[i]);
}
/**
* pmcraid_kill_tasklets - destroys tasklets registered for response handling
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* none
*/
static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
{
int i;
for (i = 0; i < pinstance->num_hrrq; i++)
tasklet_kill(&pinstance->isr_tasklet[i]);
}
/**
* pmcraid_release_buffers - release per-adapter buffers allocated
*
* @pinstance: pointer to adapter soft state
*
* Return Value
* none
*/
static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
{
pmcraid_release_config_buffers(pinstance);
pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
if (pinstance->inq_data != NULL) {
pci_free_consistent(pinstance->pdev,
sizeof(struct pmcraid_inquiry_data),
pinstance->inq_data,
pinstance->inq_data_baddr);
pinstance->inq_data = NULL;
pinstance->inq_data_baddr = 0;
}
if (pinstance->timestamp_data != NULL) {
pci_free_consistent(pinstance->pdev,
sizeof(struct pmcraid_timestamp_data),
pinstance->timestamp_data,
pinstance->timestamp_data_baddr);
pinstance->timestamp_data = NULL;
pinstance->timestamp_data_baddr = 0;
}
}
/**
* pmcraid_init_buffers - allocates memory and initializes various structures
* @pinstance: pointer to per adapter instance structure
*
* This routine pre-allocates memory based on the type of block as below:
* cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
* IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
* config-table entries : DMAable memory using pci_alloc_consistent
* HostRRQs : DMAable memory, using pci_alloc_consistent
*
* Return Value
* 0 in case all of the blocks are allocated, -ENOMEM otherwise.
*/
static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
{
int i;
if (pmcraid_allocate_host_rrqs(pinstance)) {
pmcraid_err("couldn't allocate memory for %d host rrqs\n",
pinstance->num_hrrq);
return -ENOMEM;
}
if (pmcraid_allocate_config_buffers(pinstance)) {
pmcraid_err("couldn't allocate memory for config buffers\n");
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
return -ENOMEM;
}
if (pmcraid_allocate_cmd_blocks(pinstance)) {
pmcraid_err("couldn't allocate memory for cmd blocks\n");
pmcraid_release_config_buffers(pinstance);
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
return -ENOMEM;
}
if (pmcraid_allocate_control_blocks(pinstance)) {
pmcraid_err("couldn't allocate memory control blocks\n");
pmcraid_release_config_buffers(pinstance);
pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
return -ENOMEM;
}
/* allocate DMAable memory for page D0 INQUIRY buffer */
pinstance->inq_data = pci_alloc_consistent(
pinstance->pdev,
sizeof(struct pmcraid_inquiry_data),
&pinstance->inq_data_baddr);
if (pinstance->inq_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
pmcraid_release_buffers(pinstance);
return -ENOMEM;
}
/* allocate DMAable memory for set timestamp data buffer */
pinstance->timestamp_data = pci_alloc_consistent(
pinstance->pdev,
sizeof(struct pmcraid_timestamp_data),
&pinstance->timestamp_data_baddr);
if (pinstance->timestamp_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for \
set time_stamp \n");
pmcraid_release_buffers(pinstance);
return -ENOMEM;
}
/* Initialize all the command blocks and add them to free pool. No
* need to lock (free_pool_lock) as this is done in initialization
* itself
*/
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
pmcraid_init_cmdblk(cmdp, i);
cmdp->drv_inst = pinstance;
list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
}
return 0;
}
/**
* pmcraid_reinit_buffers - resets various buffer pointers
* @pinstance: pointer to adapter instance
* Return value
* none
*/
static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
{
int i;
int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
for (i = 0; i < pinstance->num_hrrq; i++) {
memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
pinstance->host_toggle_bit[i] = 1;
}
}
/**
* pmcraid_init_instance - initialize per instance data structure
* @pdev: pointer to pci device structure
* @host: pointer to Scsi_Host structure
* @mapped_pci_addr: memory mapped IOA configuration registers
*
* Return Value
* 0 on success, non-zero in case of any failure
*/
static int __devinit pmcraid_init_instance(
struct pci_dev *pdev,
struct Scsi_Host *host,
void __iomem *mapped_pci_addr
)
{
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)host->hostdata;
pinstance->host = host;
pinstance->pdev = pdev;
/* Initialize register addresses */
pinstance->mapped_dma_addr = mapped_pci_addr;
/* Initialize chip-specific details */
{
struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
pint_regs->ioa_host_interrupt_reg =
mapped_pci_addr + chip_cfg->ioa_host_intr;
pint_regs->ioa_host_interrupt_clr_reg =
mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
pint_regs->ioa_host_msix_interrupt_reg =
mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
pint_regs->host_ioa_interrupt_reg =
mapped_pci_addr + chip_cfg->host_ioa_intr;
pint_regs->host_ioa_interrupt_clr_reg =
mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
/* Current version of firmware exposes interrupt mask set
* and mask clr registers through memory mapped bar0.
*/
pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
pint_regs->ioa_host_interrupt_mask_reg =
mapped_pci_addr + chip_cfg->ioa_host_mask;
pint_regs->ioa_host_interrupt_mask_clr_reg =
mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
pint_regs->global_interrupt_mask_reg =
mapped_pci_addr + chip_cfg->global_intr_mask;
};
pinstance->ioa_reset_attempts = 0;
init_waitqueue_head(&pinstance->reset_wait_q);
atomic_set(&pinstance->outstanding_cmds, 0);
atomic_set(&pinstance->last_message_id, 0);
atomic_set(&pinstance->expose_resources, 0);
INIT_LIST_HEAD(&pinstance->free_res_q);
INIT_LIST_HEAD(&pinstance->used_res_q);
INIT_LIST_HEAD(&pinstance->free_cmd_pool);
INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
spin_lock_init(&pinstance->free_pool_lock);
spin_lock_init(&pinstance->pending_pool_lock);
spin_lock_init(&pinstance->resource_lock);
mutex_init(&pinstance->aen_queue_lock);
/* Work-queue (Shared) for deferred processing error handling */
INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
/* Initialize the default log_level */
pinstance->current_log_level = pmcraid_log_level;
/* Setup variables required for reset engine */
pinstance->ioa_state = IOA_STATE_UNKNOWN;
pinstance->reset_cmd = NULL;
return 0;
}
/**
* pmcraid_shutdown - shutdown adapter controller.
* @pdev: pci device struct
*
* Issues an adapter shutdown to the card waits for its completion
*
* Return value
* none
*/
static void pmcraid_shutdown(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
pmcraid_reset_bringdown(pinstance);
}
/**
* pmcraid_get_minor - returns unused minor number from minor number bitmap
*/
static unsigned short pmcraid_get_minor(void)
{
int minor;
minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
__set_bit(minor, pmcraid_minor);
return minor;
}
/**
* pmcraid_release_minor - releases given minor back to minor number bitmap
*/
static void pmcraid_release_minor(unsigned short minor)
{
__clear_bit(minor, pmcraid_minor);
}
/**
* pmcraid_setup_chrdev - allocates a minor number and registers a char device
*
* @pinstance: pointer to adapter instance for which to register device
*
* Return value
* 0 in case of success, otherwise non-zero
*/
static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
{
int minor;
int error;
minor = pmcraid_get_minor();
cdev_init(&pinstance->cdev, &pmcraid_fops);
pinstance->cdev.owner = THIS_MODULE;
error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
if (error)
pmcraid_release_minor(minor);
else
device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
NULL, "%s%u", PMCRAID_DEVFILE, minor);
return error;
}
/**
* pmcraid_release_chrdev - unregisters per-adapter management interface
*
* @pinstance: pointer to adapter instance structure
*
* Return value
* none
*/
static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
{
pmcraid_release_minor(MINOR(pinstance->cdev.dev));
device_destroy(pmcraid_class,
MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
cdev_del(&pinstance->cdev);
}
/**
* pmcraid_remove - IOA hot plug remove entry point
* @pdev: pci device struct
*
* Return value
* none
*/
static void __devexit pmcraid_remove(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
/* remove the management interface (/dev file) for this device */
pmcraid_release_chrdev(pinstance);
/* remove host template from scsi midlayer */
scsi_remove_host(pinstance->host);
/* block requests from mid-layer */
scsi_block_requests(pinstance->host);
/* initiate shutdown adapter */
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
flush_work_sync(&pinstance->worker_q);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
pmcraid_release_buffers(pinstance);
iounmap(pinstance->mapped_dma_addr);
pci_release_regions(pdev);
scsi_host_put(pinstance->host);
pci_disable_device(pdev);
return;
}
#ifdef CONFIG_PM
/**
* pmcraid_suspend - driver suspend entry point for power management
* @pdev: PCI device structure
* @state: PCI power state to suspend routine
*
* Return Value - 0 always
*/
static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
pmcraid_kill_tasklets(pinstance);
pci_set_drvdata(pinstance->pdev, pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/**
* pmcraid_resume - driver resume entry point PCI power management
* @pdev: PCI device structure
*
* Return Value - 0 in case of success. Error code in case of any failure
*/
static int pmcraid_resume(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
struct Scsi_Host *host = pinstance->host;
int rc;
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
rc = pci_enable_device(pdev);
if (rc) {
dev_err(&pdev->dev, "resume: Enable device failed\n");
return rc;
}
pci_set_master(pdev);
if ((sizeof(dma_addr_t) == 4) ||
pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc == 0)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
goto disable_device;
}
pmcraid_disable_interrupts(pinstance, ~0);
atomic_set(&pinstance->outstanding_cmds, 0);
rc = pmcraid_register_interrupt_handler(pinstance);
if (rc) {
dev_err(&pdev->dev,
"resume: couldn't register interrupt handlers\n");
rc = -ENODEV;
goto release_host;
}
pmcraid_init_tasklets(pinstance);
pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
/* Start with hard reset sequence which brings up IOA to operational
* state as well as completes the reset sequence.
*/
pinstance->ioa_hard_reset = 1;
/* Start IOA firmware initialization and bring card to Operational
* state.
*/
if (pmcraid_reset_bringup(pinstance)) {
dev_err(&pdev->dev, "couldn't initialize IOA\n");
rc = -ENODEV;
goto release_tasklets;
}
return 0;
release_tasklets:
pmcraid_disable_interrupts(pinstance, ~0);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
release_host:
scsi_host_put(host);
disable_device:
pci_disable_device(pdev);
return rc;
}
#else
#define pmcraid_suspend NULL
#define pmcraid_resume NULL
#endif /* CONFIG_PM */
/**
* pmcraid_complete_ioa_reset - Called by either timer or tasklet during
* completion of the ioa reset
* @cmd: pointer to reset command block
*/
static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long flags;
spin_lock_irqsave(pinstance->host->host_lock, flags);
pmcraid_ioa_reset(cmd);
spin_unlock_irqrestore(pinstance->host->host_lock, flags);
scsi_unblock_requests(pinstance->host);
schedule_work(&pinstance->worker_q);
}
/**
* pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
*
* @cmd: pointer to pmcraid_cmd structure
*
* Return Value
* 0 for success or non-zero for failure cases
*/
static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
pmcraid_reinit_cmdblk(cmd);
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
/* If this was called as part of resource table reinitialization due to
* lost CCN, it is enough to return the command block back to free pool
* as part of set_supported_devs completion function.
*/
if (cmd->drv_inst->reinit_cfg_table) {
cmd->drv_inst->reinit_cfg_table = 0;
cmd->release = 1;
cmd_done = pmcraid_reinit_cfgtable_done;
}
/* we will be done with the reset sequence after set supported devices,
* setup the done function to return the command block back to free
* pool
*/
pmcraid_send_cmd(cmd,
cmd_done,
PMCRAID_SET_SUP_DEV_TIMEOUT,
pmcraid_timeout_handler);
return;
}
/**
* pmcraid_set_timestamp - set the timestamp to IOAFP
*
* @cmd: pointer to pmcraid_cmd structure
*
* Return Value
* 0 for success or non-zero for failure cases
*/
static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
__be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct timeval tv;
__le64 timestamp;
do_gettimeofday(&tv);
timestamp = tv.tv_sec * 1000;
pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
pmcraid_reinit_cmdblk(cmd);
ioarcb->request_type = REQ_TYPE_SCSI;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
ioarcb->data_transfer_length =
cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
ioadl = &(ioarcb->add_data.u.ioadl[0]);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
if (!pinstance->timestamp_error) {
pinstance->timestamp_error = 0;
pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
} else {
pmcraid_send_cmd(cmd, pmcraid_return_cmd,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
return;
}
}
/**
* pmcraid_init_res_table - Initialize the resource table
* @cmd: pointer to pmcraid command struct
*
* This function looks through the existing resource table, comparing
* it with the config table. This function will take care of old/new
* devices and schedule adding/removing them from the mid-layer
* as appropriate.
*
* Return value
* None
*/
static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
{
struct pmcraid_instance *pinstance = cmd->drv_inst;
struct pmcraid_resource_entry *res, *temp;
struct pmcraid_config_table_entry *cfgte;
unsigned long lock_flags;
int found, rc, i;
u16 fw_version;
LIST_HEAD(old_res);
if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
pmcraid_err("IOA requires microcode download\n");
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
/* resource list is protected by pinstance->resource_lock.
* init_res_table can be called from probe (user-thread) or runtime
* reset (timer/tasklet)
*/
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
list_move_tail(&res->queue, &old_res);
for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
if (be16_to_cpu(pinstance->inq_data->fw_version) <=
PMCRAID_FW_VERSION_1)
cfgte = &pinstance->cfg_table->entries[i];
else
cfgte = (struct pmcraid_config_table_entry *)
&pinstance->cfg_table->entries_ext[i];
if (!pmcraid_expose_resource(fw_version, cfgte))
continue;
found = 0;
/* If this entry was already detected and initialized */
list_for_each_entry_safe(res, temp, &old_res, queue) {
rc = memcmp(&res->cfg_entry.resource_address,
&cfgte->resource_address,
sizeof(cfgte->resource_address));
if (!rc) {
list_move_tail(&res->queue,
&pinstance->used_res_q);
found = 1;
break;
}
}
/* If this is new entry, initialize it and add it the queue */
if (!found) {
if (list_empty(&pinstance->free_res_q)) {
pmcraid_err("Too many devices attached\n");
break;
}
found = 1;
res = list_entry(pinstance->free_res_q.next,
struct pmcraid_resource_entry, queue);
res->scsi_dev = NULL;
res->change_detected = RES_CHANGE_ADD;
res->reset_progress = 0;
list_move_tail(&res->queue, &pinstance->used_res_q);
}
/* copy new configuration table entry details into driver
* maintained resource entry
*/
if (found) {
memcpy(&res->cfg_entry, cfgte,
pinstance->config_table_entry_size);
pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
res->cfg_entry.resource_type,
(fw_version <= PMCRAID_FW_VERSION_1 ?
res->cfg_entry.unique_flags1 :
res->cfg_entry.array_id & 0xFF),
le32_to_cpu(res->cfg_entry.resource_address));
}
}
/* Detect any deleted entries, mark them for deletion from mid-layer */
list_for_each_entry_safe(res, temp, &old_res, queue) {
if (res->scsi_dev) {
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
list_move_tail(&res->queue, &pinstance->used_res_q);
} else {
list_move_tail(&res->queue, &pinstance->free_res_q);
}
}
/* release the resource list lock */
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
pmcraid_set_timestamp(cmd);
}
/**
* pmcraid_querycfg - Send a Query IOA Config to the adapter.
* @cmd: pointer pmcraid_cmd struct
*
* This function sends a Query IOA Configuration command to the adapter to
* retrieve the IOA configuration table.
*
* Return value:
* none
*/
static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct pmcraid_instance *pinstance = cmd->drv_inst;
int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
if (be16_to_cpu(pinstance->inq_data->fw_version) <=
PMCRAID_FW_VERSION_1)
pinstance->config_table_entry_size =
sizeof(struct pmcraid_config_table_entry);
else
pinstance->config_table_entry_size =
sizeof(struct pmcraid_config_table_entry_ext);
ioarcb->request_type = REQ_TYPE_IOACMD;
ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
/* firmware requires 4-byte length field, specified in B.E format */
memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
/* Since entire config table can be described by single IOADL, it can
* be part of IOARCB itself
*/
ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length =
cpu_to_le32(sizeof(struct pmcraid_config_table));
ioadl = &(ioarcb->add_data.u.ioadl[0]);
ioadl->flags = IOADL_FLAGS_LAST_DESC;
ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
pmcraid_send_cmd(cmd, pmcraid_init_res_table,
PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
}
/**
* pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
* @pdev: pointer to pci device structure
* @dev_id: pointer to device ids structure
*
* Return Value
* returns 0 if the device is claimed and successfully configured.
* returns non-zero error code in case of any failure
*/
static int __devinit pmcraid_probe(
struct pci_dev *pdev,
const struct pci_device_id *dev_id
)
{
struct pmcraid_instance *pinstance;
struct Scsi_Host *host;
void __iomem *mapped_pci_addr;
int rc = PCIBIOS_SUCCESSFUL;
if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
pmcraid_err
("maximum number(%d) of supported adapters reached\n",
atomic_read(&pmcraid_adapter_count));
return -ENOMEM;
}
atomic_inc(&pmcraid_adapter_count);
rc = pci_enable_device(pdev);
if (rc) {
dev_err(&pdev->dev, "Cannot enable adapter\n");
atomic_dec(&pmcraid_adapter_count);
return rc;
}
dev_info(&pdev->dev,
"Found new IOA(%x:%x), Total IOA count: %d\n",
pdev->vendor, pdev->device,
atomic_read(&pmcraid_adapter_count));
rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
if (rc < 0) {
dev_err(&pdev->dev,
"Couldn't register memory range of registers\n");
goto out_disable_device;
}
mapped_pci_addr = pci_iomap(pdev, 0, 0);
if (!mapped_pci_addr) {
dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
rc = -ENOMEM;
goto out_release_regions;
}
pci_set_master(pdev);
/* Firmware requires the system bus address of IOARCB to be within
* 32-bit addressable range though it has 64-bit IOARRIN register.
* However, firmware supports 64-bit streaming DMA buffers, whereas
* coherent buffers are to be 32-bit. Since pci_alloc_consistent always
* returns memory within 4GB (if not, change this logic), coherent
* buffers are within firmware acceptable address ranges.
*/
if ((sizeof(dma_addr_t) == 4) ||
pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
/* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
* bit mask for pci_alloc_consistent to return addresses within 4GB
*/
if (rc == 0)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
goto cleanup_nomem;
}
host = scsi_host_alloc(&pmcraid_host_template,
sizeof(struct pmcraid_instance));
if (!host) {
dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
rc = -ENOMEM;
goto cleanup_nomem;
}
host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
host->unique_id = host->host_no;
host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
/* zero out entire instance structure */
pinstance = (struct pmcraid_instance *)host->hostdata;
memset(pinstance, 0, sizeof(*pinstance));
pinstance->chip_cfg =
(struct pmcraid_chip_details *)(dev_id->driver_data);
rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
if (rc < 0) {
dev_err(&pdev->dev, "failed to initialize adapter instance\n");
goto out_scsi_host_put;
}
pci_set_drvdata(pdev, pinstance);
/* Save PCI config-space for use following the reset */
rc = pci_save_state(pinstance->pdev);
if (rc != 0) {
dev_err(&pdev->dev, "Failed to save PCI config space\n");
goto out_scsi_host_put;
}
pmcraid_disable_interrupts(pinstance, ~0);
rc = pmcraid_register_interrupt_handler(pinstance);
if (rc) {
dev_err(&pdev->dev, "couldn't register interrupt handler\n");
goto out_scsi_host_put;
}
pmcraid_init_tasklets(pinstance);
/* allocate verious buffers used by LLD.*/
rc = pmcraid_init_buffers(pinstance);
if (rc) {
pmcraid_err("couldn't allocate memory blocks\n");
goto out_unregister_isr;
}
/* check the reset type required */
pmcraid_reset_type(pinstance);
pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
/* Start IOA firmware initialization and bring card to Operational
* state.
*/
pmcraid_info("starting IOA initialization sequence\n");
if (pmcraid_reset_bringup(pinstance)) {
dev_err(&pdev->dev, "couldn't initialize IOA\n");
rc = 1;
goto out_release_bufs;
}
/* Add adapter instance into mid-layer list */
rc = scsi_add_host(pinstance->host, &pdev->dev);
if (rc != 0) {
pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
goto out_release_bufs;
}
scsi_scan_host(pinstance->host);
rc = pmcraid_setup_chrdev(pinstance);
if (rc != 0) {
pmcraid_err("couldn't create mgmt interface, error: %x\n",
rc);
goto out_remove_host;
}
/* Schedule worker thread to handle CCN and take care of adding and
* removing devices to OS
*/
atomic_set(&pinstance->expose_resources, 1);
schedule_work(&pinstance->worker_q);
return rc;
out_remove_host:
scsi_remove_host(host);
out_release_bufs:
pmcraid_release_buffers(pinstance);
out_unregister_isr:
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
out_scsi_host_put:
scsi_host_put(host);
cleanup_nomem:
iounmap(mapped_pci_addr);
out_release_regions:
pci_release_regions(pdev);
out_disable_device:
atomic_dec(&pmcraid_adapter_count);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return -ENODEV;
}
/*
* PCI driver structure of pcmraid driver
*/
static struct pci_driver pmcraid_driver = {
.name = PMCRAID_DRIVER_NAME,
.id_table = pmcraid_pci_table,
.probe = pmcraid_probe,
.remove = pmcraid_remove,
.suspend = pmcraid_suspend,
.resume = pmcraid_resume,
.shutdown = pmcraid_shutdown
};
/**
* pmcraid_init - module load entry point
*/
static int __init pmcraid_init(void)
{
dev_t dev;
int error;
pmcraid_info("%s Device Driver version: %s\n",
PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
error = alloc_chrdev_region(&dev, 0,
PMCRAID_MAX_ADAPTERS,
PMCRAID_DEVFILE);
if (error) {
pmcraid_err("failed to get a major number for adapters\n");
goto out_init;
}
pmcraid_major = MAJOR(dev);
pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
if (IS_ERR(pmcraid_class)) {
error = PTR_ERR(pmcraid_class);
pmcraid_err("failed to register with with sysfs, error = %x\n",
error);
goto out_unreg_chrdev;
}
error = pmcraid_netlink_init();
if (error)
goto out_unreg_chrdev;
error = pci_register_driver(&pmcraid_driver);
if (error == 0)
goto out_init;
pmcraid_err("failed to register pmcraid driver, error = %x\n",
error);
class_destroy(pmcraid_class);
pmcraid_netlink_release();
out_unreg_chrdev:
unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
out_init:
return error;
}
/**
* pmcraid_exit - module unload entry point
*/
static void __exit pmcraid_exit(void)
{
pmcraid_netlink_release();
unregister_chrdev_region(MKDEV(pmcraid_major, 0),
PMCRAID_MAX_ADAPTERS);
pci_unregister_driver(&pmcraid_driver);
class_destroy(pmcraid_class);
}
module_init(pmcraid_init);
module_exit(pmcraid_exit);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3484_0 |
crossvul-cpp_data_bad_1411_1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <uapi/linux/btf.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/perf_event.h>
#include <linux/ctype.h>
#include "disasm.h"
static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#define BPF_PROG_TYPE(_id, _name) \
[_id] = & _name ## _verifier_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
*
* The first pass is depth-first-search to check that the program is a DAG.
* It rejects the following programs:
* - larger than BPF_MAXINSNS insns
* - if loop is present (detected via back-edge)
* - unreachable insns exist (shouldn't be a forest. program = one function)
* - out of bounds or malformed jumps
* The second pass is all possible path descent from the 1st insn.
* Since it's analyzing all pathes through the program, the length of the
* analysis is limited to 64k insn, which may be hit even if total number of
* insn is less then 4K, but there are too many branches that change stack/regs.
* Number of 'branches to be analyzed' is limited to 1k
*
* On entry to each instruction, each register has a type, and the instruction
* changes the types of the registers depending on instruction semantics.
* If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
* copied to R1.
*
* All registers are 64-bit.
* R0 - return register
* R1-R5 argument passing registers
* R6-R9 callee saved registers
* R10 - frame pointer read-only
*
* At the start of BPF program the register R1 contains a pointer to bpf_context
* and has type PTR_TO_CTX.
*
* Verifier tracks arithmetic operations on pointers in case:
* BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
* 1st insn copies R10 (which has FRAME_PTR) type into R1
* and 2nd arithmetic instruction is pattern matched to recognize
* that it wants to construct a pointer to some element within stack.
* So after 2nd insn, the register R1 has type PTR_TO_STACK
* (and -20 constant is saved for further stack bounds checking).
* Meaning that this reg is a pointer to stack plus known immediate constant.
*
* Most of the time the registers have SCALAR_VALUE type, which
* means the register has some value, but it's not a valid pointer.
* (like pointer plus pointer becomes SCALAR_VALUE type)
*
* When verifier sees load or store instructions the type of base register
* can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
* four pointer types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
*
* registers used to pass values to function calls are checked against
* function argument constraints.
*
* ARG_PTR_TO_MAP_KEY is one of such argument constraints.
* It means that the register type passed to this function must be
* PTR_TO_STACK and it will be used inside the function as
* 'pointer to map element key'
*
* For example the argument constraints for bpf_map_lookup_elem():
* .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
* .arg1_type = ARG_CONST_MAP_PTR,
* .arg2_type = ARG_PTR_TO_MAP_KEY,
*
* ret_type says that this function returns 'pointer to map elem value or null'
* function expects 1st argument to be a const pointer to 'struct bpf_map' and
* 2nd argument should be a pointer to stack, which will be used inside
* the helper function as a pointer to map element key.
*
* On the kernel side the helper function looks like:
* u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
* {
* struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
* void *key = (void *) (unsigned long) r2;
* void *value;
*
* here kernel can access 'key' and 'map' pointers safely, knowing that
* [key, key + map->key_size) bytes are valid and were initialized on
* the stack of eBPF program.
* }
*
* Corresponding eBPF program may look like:
* BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
* BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
* BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
* BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
* here verifier looks at prototype of map_lookup_elem() and sees:
* .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
* Now verifier knows that this map has key of R1->map_ptr->key_size bytes
*
* Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
* Now verifier checks that [R2, R2 + map's key_size) are within stack limits
* and were initialized prior to this call.
* If it's ok, then verifier allows this BPF_CALL insn and looks at
* .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
* R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
* returns ether pointer to map value or NULL.
*
* When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
* insn, the register holding that pointer in the true branch changes state to
* PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
* branch. See check_cond_jmp_op().
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
*
* The following reference types represent a potential reference to a kernel
* resource which, after first being allocated, must be checked and freed by
* the BPF program:
* - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
*
* When the verifier sees a helper call return a reference type, it allocates a
* pointer id for the reference and stores it in the current function state.
* Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
* PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
* passes through a NULL-check conditional. For the branch wherein the state is
* changed to CONST_IMM, the verifier releases the reference.
*
* For each helper function that allocates a reference, such as
* bpf_sk_lookup_tcp(), there is a corresponding release function, such as
* bpf_sk_release(). When a reference type passes into the release function,
* the verifier also releases the reference. If any unchecked or unreleased
* reference remains at the end of the program, the verifier rejects it.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
POISON_POINTER_DELTA))
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
}
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{
return aux->map_state & BPF_MAP_PTR_UNPRIV;
}
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
const struct bpf_map *map, bool unpriv)
{
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
unpriv |= bpf_map_ptr_unpriv(aux);
aux->map_state = (unsigned long)map |
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
}
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
bool pkt_access;
int regno;
int access_size;
s64 msize_smax_value;
u64 msize_umax_value;
int ptr_id;
};
static DEFINE_MUTEX(bpf_verifier_lock);
static const struct bpf_line_info *
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
{
const struct bpf_line_info *linfo;
const struct bpf_prog *prog;
u32 i, nr_linfo;
prog = env->prog;
nr_linfo = prog->aux->nr_linfo;
if (!nr_linfo || insn_off >= prog->len)
return NULL;
linfo = prog->aux->linfo;
for (i = 1; i < nr_linfo; i++)
if (insn_off < linfo[i].insn_off)
break;
return &linfo[i - 1];
}
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
va_list args)
{
unsigned int n;
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
/* log_level controls verbosity level of eBPF verifier.
* bpf_verifier_log_write() is used to dump the verification trace to the log,
* so the user can figure out what's wrong with the program
*/
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...)
{
va_list args;
if (!bpf_verifier_log_needed(&env->log))
return;
va_start(args, fmt);
bpf_verifier_vlog(&env->log, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
{
struct bpf_verifier_env *env = private_data;
va_list args;
if (!bpf_verifier_log_needed(&env->log))
return;
va_start(args, fmt);
bpf_verifier_vlog(&env->log, fmt, args);
va_end(args);
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...)
{
const struct bpf_line_info *linfo;
if (!bpf_verifier_log_needed(&env->log))
return;
linfo = find_linfo(env, insn_off);
if (!linfo || linfo == env->prev_linfo)
return;
if (prefix_fmt) {
va_list args;
va_start(args, prefix_fmt);
bpf_verifier_vlog(&env->log, prefix_fmt, args);
va_end(args);
}
verbose(env, "%s\n",
ltrim(btf_name_by_offset(env->prog->aux->btf,
linfo->line_off)));
env->prev_linfo = linfo;
}
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_PACKET ||
type == PTR_TO_PACKET_META;
}
static bool reg_type_may_be_null(enum bpf_reg_type type)
{
return type == PTR_TO_MAP_VALUE_OR_NULL ||
type == PTR_TO_SOCKET_OR_NULL;
}
static bool type_is_refcounted(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET;
}
static bool type_is_refcounted_or_null(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
}
static bool reg_is_refcounted(const struct bpf_reg_state *reg)
{
return type_is_refcounted(reg->type);
}
static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
{
return type_is_refcounted_or_null(reg->type);
}
static bool arg_type_is_refcounted(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_SOCKET;
}
/* Determine whether the function releases some resources allocated by another
* function call. The first reference type argument will be assumed to be
* released by release_reference().
*/
static bool is_release_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_release;
}
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
[SCALAR_VALUE] = "inv",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
[PTR_TO_STACK] = "fp",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_PACKET_END] = "pkt_end",
[PTR_TO_FLOW_KEYS] = "flow_keys",
[PTR_TO_SOCKET] = "sock",
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
};
static char slot_type_char[] = {
[STACK_INVALID] = '?',
[STACK_SPILL] = 'r',
[STACK_MISC] = 'm',
[STACK_ZERO] = '0',
};
static void print_liveness(struct bpf_verifier_env *env,
enum bpf_reg_liveness live)
{
if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
verbose(env, "_");
if (live & REG_LIVE_READ)
verbose(env, "r");
if (live & REG_LIVE_WRITTEN)
verbose(env, "w");
if (live & REG_LIVE_DONE)
verbose(env, "D");
}
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[reg->frameno];
}
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state)
{
const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
if (state->frameno)
verbose(env, " frame%d:", state->frameno);
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
t = reg->type;
if (t == NOT_INIT)
continue;
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str[t]);
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
if (t == PTR_TO_STACK)
verbose(env, ",call_%d", func(env, reg)->callsite);
} else {
verbose(env, "(id=%d", reg->id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
if (type_is_pkt_pointer(t))
verbose(env, ",r=%d", reg->range);
else if (t == CONST_PTR_TO_MAP ||
t == PTR_TO_MAP_VALUE ||
t == PTR_TO_MAP_VALUE_OR_NULL)
verbose(env, ",ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (tnum_is_const(reg->var_off)) {
/* Typically an immediate SCALAR_VALUE, but
* could be a pointer whose offset is too big
* for reg->off
*/
verbose(env, ",imm=%llx", reg->var_off.value);
} else {
if (reg->smin_value != reg->umin_value &&
reg->smin_value != S64_MIN)
verbose(env, ",smin_value=%lld",
(long long)reg->smin_value);
if (reg->smax_value != reg->umax_value &&
reg->smax_value != S64_MAX)
verbose(env, ",smax_value=%lld",
(long long)reg->smax_value);
if (reg->umin_value != 0)
verbose(env, ",umin_value=%llu",
(unsigned long long)reg->umin_value);
if (reg->umax_value != U64_MAX)
verbose(env, ",umax_value=%llu",
(unsigned long long)reg->umax_value);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, ",var_off=%s", tn_buf);
}
}
verbose(env, ")");
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
char types_buf[BPF_REG_SIZE + 1];
bool valid = false;
int j;
for (j = 0; j < BPF_REG_SIZE; j++) {
if (state->stack[i].slot_type[j] != STACK_INVALID)
valid = true;
types_buf[j] = slot_type_char[
state->stack[i].slot_type[j]];
}
types_buf[BPF_REG_SIZE] = 0;
if (!valid)
continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live);
if (state->stack[i].slot_type[0] == STACK_SPILL)
verbose(env, "=%s",
reg_type_str[state->stack[i].spilled_ptr.type]);
else
verbose(env, "=%s", types_buf);
}
if (state->acquired_refs && state->refs[0].id) {
verbose(env, " refs=%d", state->refs[0].id);
for (i = 1; i < state->acquired_refs; i++)
if (state->refs[i].id)
verbose(env, ",%d", state->refs[i].id);
}
verbose(env, "\n");
}
#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int copy_##NAME##_state(struct bpf_func_state *dst, \
const struct bpf_func_state *src) \
{ \
if (!src->FIELD) \
return 0; \
if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
/* internal bug, make state invalid to reject the program */ \
memset(dst, 0, sizeof(*dst)); \
return -EFAULT; \
} \
memcpy(dst->FIELD, src->FIELD, \
sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
return 0; \
}
/* copy_reference_state() */
COPY_STATE_FN(reference, acquired_refs, refs, 1)
/* copy_stack_state() */
COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef COPY_STATE_FN
#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
bool copy_old) \
{ \
u32 old_size = state->COUNT; \
struct bpf_##NAME##_state *new_##FIELD; \
int slot = size / SIZE; \
\
if (size <= old_size || !size) { \
if (copy_old) \
return 0; \
state->COUNT = slot * SIZE; \
if (!size && old_size) { \
kfree(state->FIELD); \
state->FIELD = NULL; \
} \
return 0; \
} \
new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
GFP_KERNEL); \
if (!new_##FIELD) \
return -ENOMEM; \
if (copy_old) { \
if (state->FIELD) \
memcpy(new_##FIELD, state->FIELD, \
sizeof(*new_##FIELD) * (old_size / SIZE)); \
memset(new_##FIELD + old_size / SIZE, 0, \
sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
} \
state->COUNT = slot * SIZE; \
kfree(state->FIELD); \
state->FIELD = new_##FIELD; \
return 0; \
}
/* realloc_reference_state() */
REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
/* realloc_stack_state() */
REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef REALLOC_STATE_FN
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which realloc_stack_state() copies over. It points to previous
* bpf_verifier_state which is never reallocated.
*/
static int realloc_func_state(struct bpf_func_state *state, int stack_size,
int refs_size, bool copy_old)
{
int err = realloc_reference_state(state, refs_size, copy_old);
if (err)
return err;
return realloc_stack_state(state, stack_size, copy_old);
}
/* Acquire a pointer id from the env and update the state->refs to include
* this new pointer reference.
* On success, returns a valid pointer id to associate with the register
* On failure, returns a negative errno.
*/
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_func_state *state = cur_func(env);
int new_ofs = state->acquired_refs;
int id, err;
err = realloc_reference_state(state, state->acquired_refs + 1, true);
if (err)
return err;
id = ++env->id_gen;
state->refs[new_ofs].id = id;
state->refs[new_ofs].insn_idx = insn_idx;
return id;
}
/* release function corresponding to acquire_reference_state(). Idempotent. */
static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
{
int i, last_idx;
if (!ptr_id)
return -EFAULT;
last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
if (state->refs[i].id == ptr_id) {
if (last_idx && i != last_idx)
memcpy(&state->refs[i], &state->refs[last_idx],
sizeof(*state->refs));
memset(&state->refs[last_idx], 0, sizeof(*state->refs));
state->acquired_refs--;
return 0;
}
}
return -EFAULT;
}
/* variation on the above for cases where we expect that there must be an
* outstanding reference for the specified ptr_id.
*/
static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
{
struct bpf_func_state *state = cur_func(env);
int err;
err = __release_reference_state(state, ptr_id);
if (WARN_ON_ONCE(err != 0))
verbose(env, "verifier internal error: can't release reference\n");
return err;
}
static int transfer_reference_state(struct bpf_func_state *dst,
struct bpf_func_state *src)
{
int err = realloc_reference_state(dst, src->acquired_refs, false);
if (err)
return err;
err = copy_reference_state(dst, src);
if (err)
return err;
return 0;
}
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
kfree(state->refs);
kfree(state->stack);
kfree(state);
}
static void free_verifier_state(struct bpf_verifier_state *state,
bool free_self)
{
int i;
for (i = 0; i <= state->curframe; i++) {
free_func_state(state->frame[i]);
state->frame[i] = NULL;
}
if (free_self)
kfree(state);
}
/* copy verifier state from src to dst growing dst stack space
* when necessary to accommodate larger src stack
*/
static int copy_func_state(struct bpf_func_state *dst,
const struct bpf_func_state *src)
{
int err;
err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
false);
if (err)
return err;
memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
err = copy_reference_state(dst, src);
if (err)
return err;
return copy_stack_state(dst, src);
}
static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
int i, err;
/* if dst has more stack frames then src frame, free them */
for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL;
}
dst_state->curframe = src->curframe;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst)
return -ENOMEM;
dst_state->frame[i] = dst;
}
err = copy_func_state(dst, src->frame[i]);
if (err)
return err;
}
return 0;
}
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
int *insn_idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem, *head = env->head;
int err;
if (env->head == NULL)
return -ENOENT;
if (cur) {
err = copy_verifier_state(cur, &head->st);
if (err)
return err;
}
if (insn_idx)
*insn_idx = head->insn_idx;
if (prev_insn_idx)
*prev_insn_idx = head->prev_insn_idx;
elem = head->next;
free_verifier_state(&head->st, false);
kfree(head);
env->head = elem;
env->stack_size--;
return 0;
}
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem;
int err;
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
elem->next = env->head;
env->head = elem;
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
if (err)
goto err;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
verbose(env, "BPF program is too complex\n");
goto err;
}
return &elem->st;
err:
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
/* pop all elements and return */
while (!pop_stack(env, NULL, NULL));
return NULL;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void __mark_reg_not_init(struct bpf_reg_state *reg);
/* Mark the unknown part of a register (variable offset or scalar value) as
* known to have the value @imm.
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
/* Clear id, off, and union(map_ptr, range) */
memset(((u8 *)reg) + sizeof(reg->type), 0,
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm;
reg->umin_value = imm;
reg->umax_value = imm;
}
/* Mark the 'variable offset' part of a register as zero. This should be
* used only on registers holding a pointer type.
*/
static void __mark_reg_known_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
}
static void __mark_reg_const_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
reg->type = SCALAR_VALUE;
}
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_known_zero(regs + regno);
}
static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
{
return type_is_pkt_pointer(reg->type);
}
static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
{
return reg_is_pkt_pointer(reg) ||
reg->type == PTR_TO_PACKET_END;
}
/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
enum bpf_reg_type which)
{
/* The register can already have a range from prior markings.
* This is fine as long as it hasn't been advanced from its
* origin.
*/
return reg->type == which &&
reg->id == 0 &&
reg->off == 0 &&
tnum_equals_const(reg->var_off, 0);
}
/* Attempts to improve min/max values based on var_off information */
static void __update_reg_bounds(struct bpf_reg_state *reg)
{
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN));
/* max signed is min(sign bit) | max(other bits) */
reg->smax_value = min_t(s64, reg->smax_value,
reg->var_off.value | (reg->var_off.mask & S64_MAX));
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
reg->var_off.value | reg->var_off.mask);
}
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
if (reg->smin_value >= 0 || reg->smax_value < 0) {
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
return;
}
/* Learn sign from unsigned bounds. Signed bounds cross the sign
* boundary, so we must be careful.
*/
if ((s64)reg->umax_value >= 0) {
/* Positive. We can't learn anything from the smin, but smax
* is positive, hence safe.
*/
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
} else if ((s64)reg->umin_value < 0) {
/* Negative. We can't learn anything from the smax, but smin
* is negative, hence safe.
*/
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value;
}
}
/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg)
{
reg->var_off = tnum_intersect(reg->var_off,
tnum_range(reg->umin_value,
reg->umax_value));
}
/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
reg->smin_value = S64_MIN;
reg->smax_value = S64_MAX;
reg->umin_value = 0;
reg->umax_value = U64_MAX;
}
/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(struct bpf_reg_state *reg)
{
/*
* Clear type, id, off, and union(map_ptr, range) and
* padding between 'type' and union
*/
memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
reg->type = SCALAR_VALUE;
reg->var_off = tnum_unknown;
reg->frameno = 0;
__mark_reg_unbounded(reg);
}
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_unknown(regs + regno);
}
static void __mark_reg_not_init(struct bpf_reg_state *reg)
{
__mark_reg_unknown(reg);
reg->type = NOT_INIT;
}
static void mark_reg_not_init(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_not_init(regs + regno);
}
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
struct bpf_reg_state *regs = state->regs;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
mark_reg_not_init(env, regs, i);
regs[i].live = REG_LIVE_NONE;
regs[i].parent = NULL;
}
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(env, regs, BPF_REG_FP);
regs[BPF_REG_FP].frameno = state->frameno;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
}
#define BPF_MAIN_FUNC (-1)
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int callsite, int frameno, int subprogno)
{
state->callsite = callsite;
state->frameno = frameno;
state->subprogno = subprogno;
init_reg_state(env, state);
}
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
static int cmp_subprogs(const void *a, const void *b)
{
return ((struct bpf_subprog_info *)a)->start -
((struct bpf_subprog_info *)b)->start;
}
static int find_subprog(struct bpf_verifier_env *env, int off)
{
struct bpf_subprog_info *p;
p = bsearch(&off, env->subprog_info, env->subprog_cnt,
sizeof(env->subprog_info[0]), cmp_subprogs);
if (!p)
return -ENOENT;
return p - env->subprog_info;
}
static int add_subprog(struct bpf_verifier_env *env, int off)
{
int insn_cnt = env->prog->len;
int ret;
if (off >= insn_cnt || off < 0) {
verbose(env, "call to invalid destination\n");
return -EINVAL;
}
ret = find_subprog(env, off);
if (ret >= 0)
return 0;
if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
verbose(env, "too many subprograms\n");
return -E2BIG;
}
env->subprog_info[env->subprog_cnt++].start = off;
sort(env->subprog_info, env->subprog_cnt,
sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
return 0;
}
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
/* Add entry function. */
ret = add_subprog(env, 0);
if (ret < 0)
return ret;
/* determine subprog starts. The end is one before the next starts */
for (i = 0; i < insn_cnt; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
if (!env->allow_ptr_leaks) {
verbose(env, "function calls to other bpf functions are allowed for root only\n");
return -EPERM;
}
ret = add_subprog(env, i + insn[i].imm + 1);
if (ret < 0)
return ret;
}
/* Add a fake 'exit' subprog which could simplify subprog iteration
* logic. 'subprog_cnt' should not be increased.
*/
subprog[env->subprog_cnt].start = insn_cnt;
if (env->log.level > 1)
for (i = 0; i < env->subprog_cnt; i++)
verbose(env, "func#%d @%d\n", i, subprog[i].start);
/* now check that all jumps are within the same subprog */
subprog_start = subprog[cur_subprog].start;
subprog_end = subprog[cur_subprog + 1].start;
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
if (BPF_CLASS(code) != BPF_JMP)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
}
next:
if (i == subprog_end - 1) {
/* to avoid fall-through from one subprog into another
* the last insn of the subprog should be either exit
* or unconditional jump back
*/
if (code != (BPF_JMP | BPF_EXIT) &&
code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL;
}
subprog_start = subprog_end;
cur_subprog++;
if (cur_subprog < env->subprog_cnt)
subprog_end = subprog[cur_subprog + 1].start;
}
}
return 0;
}
/* Parentage chain of this register (or stack slot) should take care of all
* issues like callee-saved registers, stack slot allocation time, etc.
*/
static int mark_reg_read(struct bpf_verifier_env *env,
const struct bpf_reg_state *state,
struct bpf_reg_state *parent)
{
bool writes = parent == state->parent; /* Observe write marks */
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (writes && state->live & REG_LIVE_WRITTEN)
break;
if (parent->live & REG_LIVE_DONE) {
verbose(env, "verifier BUG type %s var_off %lld off %d\n",
reg_type_str[parent->type],
parent->var_off.value, parent->off);
return -EFAULT;
}
/* ... then we depend on parent's value */
parent->live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
writes = true;
}
return 0;
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
return -EINVAL;
}
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (regs[regno].type == NOT_INIT) {
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
/* We don't need to worry about FP liveness because it's read-only */
if (regno != BPF_REG_FP)
return mark_reg_read(env, ®s[regno],
regs[regno].parent);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose(env, "frame pointer is read only\n");
return -EACCES;
}
regs[regno].live |= REG_LIVE_WRITTEN;
if (t == DST_OP)
mark_reg_unknown(env, regs, regno);
}
return 0;
}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_MAP_VALUE:
case PTR_TO_MAP_VALUE_OR_NULL:
case PTR_TO_STACK:
case PTR_TO_CTX:
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_PACKET_END:
case PTR_TO_FLOW_KEYS:
case CONST_PTR_TO_MAP:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
return true;
default:
return false;
}
}
/* Does this register contain a constant zero? */
static bool register_is_null(struct bpf_reg_state *reg)
{
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
}
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
static int check_stack_write(struct bpf_verifier_env *env,
struct bpf_func_state *state, /* func where register points to */
int off, int size, int value_regno, int insn_idx)
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
enum bpf_reg_type type;
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
state->acquired_refs, true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (!env->allow_ptr_leaks &&
state->stack[spi].slot_type[0] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose(env, "attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
cur = env->cur_state->frame[env->cur_state->curframe];
if (value_regno >= 0 &&
is_spillable_regtype((type = cur->regs[value_regno].type))) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
if (state != cur && type == PTR_TO_STACK) {
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
/* save register state */
state->stack[spi].spilled_ptr = cur->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++) {
if (state->stack[spi].slot_type[i] == STACK_MISC &&
!env->allow_ptr_leaks) {
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
int soff = (-spi - 1) * BPF_REG_SIZE;
/* detected reuse of integer stack slot with a pointer
* which means either llvm is reusing stack slot or
* an attacker is trying to exploit CVE-2018-3639
* (speculative store bypass)
* Have to sanitize that slot with preemptive
* store of zero.
*/
if (*poff && *poff != soff) {
/* disallow programs where single insn stores
* into two different stack slots, since verifier
* cannot sanitize them
*/
verbose(env,
"insn %d cannot access two stack slots fp%d and fp%d",
insn_idx, *poff, soff);
return -EINVAL;
}
*poff = soff;
}
state->stack[spi].slot_type[i] = STACK_SPILL;
}
} else {
u8 type = STACK_MISC;
/* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
if (state->stack[spi].slot_type[0] == STACK_SPILL)
for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_MISC;
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
* when stack slots are partially written.
* This heuristic means that read propagation will be
* conservative, since it will add reg_live_read marks
* to stack slots all the way to first state when programs
* writes+reads less than 8 bytes
*/
if (size == BPF_REG_SIZE)
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
/* when we zero initialize stack slots mark them as such */
if (value_regno >= 0 &&
register_is_null(&cur->regs[value_regno]))
type = STACK_ZERO;
/* Mark slots affected by this stack write. */
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
type;
}
return 0;
}
static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_func_state *reg_state /* func where register points to */,
int off, int size, int value_regno)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
u8 *stype;
if (reg_state->allocated_stack <= slot) {
verbose(env, "invalid read from stack off %d+0 size %d\n",
off, size);
return -EACCES;
}
stype = reg_state->stack[spi].slot_type;
if (stype[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
verbose(env, "invalid size of register spill\n");
return -EACCES;
}
for (i = 1; i < BPF_REG_SIZE; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
verbose(env, "corrupted spill memory\n");
return -EACCES;
}
}
if (value_regno >= 0) {
/* restore register state from stack */
state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
/* mark reg as written since spilled pointer state likely
* has its liveness marks cleared by is_state_visited()
* which resets stack/reg liveness for state transitions
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
mark_reg_read(env, ®_state->stack[spi].spilled_ptr,
reg_state->stack[spi].spilled_ptr.parent);
return 0;
} else {
int zeros = 0;
for (i = 0; i < size; i++) {
if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
continue;
if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
zeros++;
continue;
}
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
mark_reg_read(env, ®_state->stack[spi].spilled_ptr,
reg_state->stack[spi].spilled_ptr.parent);
if (value_regno >= 0) {
if (zeros == size) {
/* any size read into register is zero extended,
* so the whole register == const_zero
*/
__mark_reg_const_zero(&state->regs[value_regno]);
} else {
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, value_regno);
}
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
return 0;
}
}
static int check_stack_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size)
{
/* Stack accesses must be at a fixed offset, so that we
* can determine what type of data were returned. See
* check_stack_read().
*/
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable stack access var_off=%s off=%d size=%d",
tn_buf, off, size);
return -EACCES;
}
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose(env, "invalid stack off=%d size=%d\n", off, size);
return -EACCES;
}
return 0;
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_map *map = regs[regno].map_ptr;
if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
off + size > map->value_size) {
verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
map->value_size, off, size);
return -EACCES;
}
return 0;
}
/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
/* We may have adjusted the register to this map value, so we
* need to try adding each of min_value and max_value to off
* to make sure our theoretical access will be safe.
*/
if (env->log.level)
print_verifier_state(env, state);
/* The minimum value is only important with signed
* comparisons where we can't assume the floor of a
* value is 0. If we are using signed variables for our
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
if (reg->smin_value < 0 &&
(reg->smin_value == S64_MIN ||
(off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
reg->smin_value + off < 0)) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_map_access(env, regno, reg->smin_value + off, size,
zero_size_allowed);
if (err) {
verbose(env, "R%d min value is outside of the array range\n",
regno);
return err;
}
/* If we haven't set a max value then we need to bail since we can't be
* sure we won't do bad things.
* If reg->umax_value + off could overflow, treat that as unbounded too.
*/
if (reg->umax_value >= BPF_MAX_VAR_OFF) {
verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
regno);
return -EACCES;
}
err = __check_map_access(env, regno, reg->umax_value + off, size,
zero_size_allowed);
if (err)
verbose(env, "R%d max value is outside of the array range\n",
regno);
return err;
}
#define MAX_PACKET_OFF 0xffff
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
switch (env->prog->type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE)
return false;
/* fallthrough */
/* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
if (meta)
return meta->pkt_access;
env->seen_direct_write = true;
return true;
default:
return false;
}
}
static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
int off, int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
(u64)off + size > reg->range) {
verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
}
return 0;
}
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
int err;
/* We may have added a variable offset to the packet pointer; but any
* reg->range we have comes after that. We are only checking the fixed
* offset.
*/
/* We don't allow negative numbers, because we aren't tracking enough
* detail to prove they're safe.
*/
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
err = __check_packet_access(env, regno, off, size, zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
}
/* __check_packet_access has made sure "off + size - 1" is within u16.
* reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
* otherwise find_good_pkt_pointers would have refused to set range info
* that __check_packet_access would have rejected this pkt access.
* Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
*/
env->prog->aux->max_pkt_offset =
max_t(u32, env->prog->aux->max_pkt_offset,
off + reg->umax_value + size - 1);
return err;
}
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
};
if (env->ops->is_valid_access &&
env->ops->is_valid_access(off, size, t, env->prog, &info)) {
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
* access than actual ctx access size. A zero info.ctx_field_size
* will only allow for whole field access and rejects any other
* type of narrower access.
*/
*reg_type = info.reg_type;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
return 0;
}
verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
return -EACCES;
}
static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
int size)
{
if (size < 0 || off < 0 ||
(u64)off + size > sizeof(struct bpf_flow_keys)) {
verbose(env, "invalid access to flow keys off=%d size=%d\n",
off, size);
return -EACCES;
}
return 0;
}
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, enum bpf_access_type t)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = ®s[regno];
struct bpf_insn_access_aux info;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
if (!bpf_sock_is_valid_access(off, size, t, &info)) {
verbose(env, "invalid bpf_sock access off=%d size=%d\n",
off, size);
return -EACCES;
}
return 0;
}
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
if (allow_ptr_leaks)
return false;
return reg->type != SCALAR_VALUE;
}
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{
return cur_regs(env) + regno;
}
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
}
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
return reg->type == PTR_TO_CTX ||
reg->type == PTR_TO_SOCKET;
}
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
return type_is_pkt_pointer(reg->type);
}
static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
{
const struct bpf_reg_state *reg = reg_state(env, regno);
/* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
return reg->type == PTR_TO_FLOW_KEYS;
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
{
struct tnum reg_off;
int ip_align;
/* Byte size accesses are always allowed. */
if (!strict || size == 1)
return 0;
/* For platforms that do not have a Kconfig enabling
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
* NET_IP_ALIGN is universally set to '2'. And on platforms
* that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
* to this code only in strict mode where we want to emulate
* the NET_IP_ALIGN==2 checking. Therefore use an
* unconditional IP align value of '2'.
*/
ip_align = 2;
reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
if (!tnum_is_aligned(reg_off, size)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env,
"misaligned packet access off %d+%s+%d+%d size %d\n",
ip_align, tn_buf, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char *pointer_desc,
int off, int size, bool strict)
{
struct tnum reg_off;
/* Byte size accesses are always allowed. */
if (!strict || size == 1)
return 0;
reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
if (!tnum_is_aligned(reg_off, size)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
pointer_desc, tn_buf, reg->off, off, size);
return -EACCES;
}
return 0;
}
static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int off,
int size, bool strict_alignment_once)
{
bool strict = env->strict_alignment || strict_alignment_once;
const char *pointer_desc = "";
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
/* Special case, because of NET_IP_ALIGN. Given metadata sits
* right in front, treat it the very same way.
*/
return check_pkt_ptr_alignment(env, reg, off, size, strict);
case PTR_TO_FLOW_KEYS:
pointer_desc = "flow keys ";
break;
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
case PTR_TO_CTX:
pointer_desc = "context ";
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
/* The stack spill tracking logic in check_stack_write()
* and check_stack_read() relies on stack accesses being
* aligned.
*/
strict = true;
break;
case PTR_TO_SOCKET:
pointer_desc = "sock ";
break;
default:
break;
}
return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
strict);
}
static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
u16 stack = env->subprog_info[func->subprogno].stack_depth;
if (stack >= -off)
return 0;
/* update known max for given subprogram */
env->subprog_info[func->subprogno].stack_depth = -off;
return 0;
}
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
* Since recursion is prevented by check_cfg() this algorithm
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
*/
static int check_max_stack_depth(struct bpf_verifier_env *env)
{
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int ret_insn[MAX_CALL_FRAMES];
int ret_prog[MAX_CALL_FRAMES];
process_func:
/* round up to 32-bytes, since this is granularity
* of interpreter stack size
*/
depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
if (depth > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
frame + 1, depth);
return -EACCES;
}
continue_func:
subprog_end = subprog[idx + 1].start;
for (; i < subprog_end; i++) {
if (insn[i].code != (BPF_JMP | BPF_CALL))
continue;
if (insn[i].src_reg != BPF_PSEUDO_CALL)
continue;
/* remember insn and function to return to */
ret_insn[frame] = i + 1;
ret_prog[frame] = idx;
/* find the callee */
i = i + insn[i].imm + 1;
idx = find_subprog(env, i);
if (idx < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
i);
return -EFAULT;
}
frame++;
if (frame >= MAX_CALL_FRAMES) {
WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
return -EFAULT;
}
goto process_func;
}
/* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT
*/
if (frame == 0)
return 0;
depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
frame--;
i = ret_insn[frame];
idx = ret_prog[frame];
goto continue_func;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
static int get_callee_stack_depth(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int idx)
{
int start = idx + insn->imm + 1, subprog;
subprog = find_subprog(env, start);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
start);
return -EFAULT;
}
return env->subprog_info[subprog].stack_depth;
}
#endif
static int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno)
{
/* Access to ctx or passing it to a helper is only allowed in
* its original, unmodified form.
*/
if (reg->off) {
verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
regno, reg->off);
return -EACCES;
}
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
return -EACCES;
}
return 0;
}
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{
u64 mask;
/* clear high bits in bit representation */
reg->var_off = tnum_cast(reg->var_off, size);
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
reg->umin_value &= mask;
reg->umax_value &= mask;
} else {
reg->umin_value = 0;
reg->umax_value = mask;
}
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
int off, int bpf_size, enum bpf_access_type t,
int value_regno, bool strict_alignment_once)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
struct bpf_func_state *state;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
if (size < 0)
return size;
/* alignment checks will add in reg->off themselves */
err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
if (err)
return err;
/* for access checks, reg->off is just part of off */
off += reg->off;
if (reg->type == PTR_TO_MAP_VALUE) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into map\n", value_regno);
return -EACCES;
}
err = check_map_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
* PTR_TO_PACKET[_META,_END]. In the latter
* case, we know the offset is zero.
*/
if (reg_type == SCALAR_VALUE)
mark_reg_unknown(env, regs, value_regno);
else
mark_reg_known_zero(env, regs,
value_regno);
regs[value_regno].type = reg_type;
}
} else if (reg->type == PTR_TO_STACK) {
off += reg->var_off.value;
err = check_stack_access(env, reg, off, size);
if (err)
return err;
state = func(env, reg);
err = update_stack_depth(env, state, off);
if (err)
return err;
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
value_regno, insn_idx);
else
err = check_stack_read(env, state, off, size,
value_regno);
} else if (reg_is_pkt_pointer(reg)) {
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
verbose(env, "cannot write into packet\n");
return -EACCES;
}
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into packet\n",
value_regno);
return -EACCES;
}
err = check_packet_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_FLOW_KEYS) {
if (t == BPF_WRITE && value_regno >= 0 &&
is_pointer_value(env, value_regno)) {
verbose(env, "R%d leaks addr into flow keys\n",
value_regno);
return -EACCES;
}
err = check_flow_keys_access(env, off, size);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_SOCKET) {
if (t == BPF_WRITE) {
verbose(env, "cannot write into socket\n");
return -EACCES;
}
err = check_sock_access(env, regno, off, size, t);
if (!err && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]);
return -EACCES;
}
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
coerce_reg_to_size(®s[value_regno], size);
}
return err;
}
static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
insn->imm != 0) {
verbose(env, "BPF_XADD uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
}
if (is_ctx_reg(env, insn->dst_reg) ||
is_pkt_reg(env, insn->dst_reg) ||
is_flow_key_reg(env, insn->dst_reg)) {
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1, true);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
}
/* when register 'regno' is passed into function that will read 'access_size'
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized.
* Unlike most pointer bounds-checking functions, this one doesn't take an
* 'off' argument, so it has to add in reg->off itself.
*/
static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi;
if (reg->type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(reg))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[reg->type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
off = reg->off + reg->var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
return 0;
}
for (i = 0; i < access_size; i++) {
u8 *stype;
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot)
goto err;
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
if (*stype == STACK_ZERO) {
/* helper can write anything into the stack */
*stype = STACK_MISC;
goto mark;
}
err:
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
mark:
/* reading any byte out of 8-byte 'spill_slot' will cause
* the whole slot to be marked as 'read'
*/
mark_reg_read(env, &state->stack[spi].spilled_ptr,
state->stack[spi].spilled_ptr.parent);
}
return update_stack_depth(env, state, off);
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
return check_packet_access(env, regno, reg->off, access_size,
zero_size_allowed);
case PTR_TO_MAP_VALUE:
return check_map_access(env, regno, reg->off, access_size,
zero_size_allowed);
default: /* scalar_value|ptr_to_stack or invalid ptr */
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
}
}
static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_MEM ||
type == ARG_PTR_TO_MEM_OR_NULL ||
type == ARG_PTR_TO_UNINIT_MEM;
}
static bool arg_type_is_mem_size(enum bpf_arg_type type)
{
return type == ARG_CONST_SIZE ||
type == ARG_CONST_SIZE_OR_ZERO;
}
static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
return 0;
err = check_reg_arg(env, regno, SRC_OP);
if (err)
return err;
if (arg_type == ARG_ANYTHING) {
if (is_pointer_value(env, regno)) {
verbose(env, "R%d leaks addr into helper function\n",
regno);
return -EACCES;
}
return 0;
}
if (type_is_pkt_pointer(type) &&
!may_access_direct_pkt_data(env, meta, BPF_READ)) {
verbose(env, "helper access to the packet is not allowed\n");
return -EACCES;
}
if (arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE ||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_SIZE ||
arg_type == ARG_CONST_SIZE_OR_ZERO) {
expected_type = SCALAR_VALUE;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP;
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_CTX) {
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
} else if (arg_type == ARG_PTR_TO_SOCKET) {
expected_type = PTR_TO_SOCKET;
if (type != expected_type)
goto err_type;
if (meta->ptr_id || !reg->id) {
verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
meta->ptr_id, reg->id);
return -EFAULT;
}
meta->ptr_id = reg->id;
} else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
if (register_is_null(reg) &&
arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
} else {
verbose(env, "unsupported arg_type %d\n", arg_type);
return -EFAULT;
}
if (arg_type == ARG_CONST_MAP_PTR) {
/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
meta->map_ptr = reg->map_ptr;
} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
/* bpf_map_xxx(..., map_ptr, ..., key) call:
* check that [key, key + map->key_size) are within
* stack limits and initialized
*/
if (!meta->map_ptr) {
/* in function declaration map_ptr must come before
* map_key, so that it's verified and known before
* we have to check map_key here. Otherwise it means
* that kernel subsystem misconfigured verifier
*/
verbose(env, "invalid map_ptr to access map->key\n");
return -EACCES;
}
err = check_helper_mem_access(env, regno,
meta->map_ptr->key_size, false,
NULL);
} else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
if (!meta->map_ptr) {
/* kernel subsystem misconfigured verifier */
verbose(env, "invalid map_ptr to access map->value\n");
return -EACCES;
}
meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
meta);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
/* remember the mem_size which may be used later
* to refine return values.
*/
meta->msize_smax_value = reg->smax_value;
meta->msize_umax_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
*/
if (!tnum_is_const(reg->var_off))
/* For unprivileged variable accesses, disable raw
* mode so that the program is required to
* initialize all the memory that the helper could
* just partially fill up.
*/
meta = NULL;
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
regno);
return -EACCES;
}
if (reg->umin_value == 0) {
err = check_helper_mem_access(env, regno - 1, 0,
zero_size_allowed,
meta);
if (err)
return err;
}
if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
regno);
return -EACCES;
}
err = check_helper_mem_access(env, regno - 1,
reg->umax_value,
zero_size_allowed, meta);
}
return err;
err_type:
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[type], reg_type_str[expected_type]);
return -EACCES;
}
static int check_map_func_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map, int func_id)
{
if (!map)
return 0;
/* We need a two way check, first is from map perspective ... */
switch (map->map_type) {
case BPF_MAP_TYPE_PROG_ARRAY:
if (func_id != BPF_FUNC_tail_call)
goto error;
break;
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output &&
func_id != BPF_FUNC_perf_event_read_value)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_ARRAY:
if (func_id != BPF_FUNC_skb_under_cgroup &&
func_id != BPF_FUNC_current_task_under_cgroup)
goto error;
break;
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
if (func_id != BPF_FUNC_get_local_storage)
goto error;
break;
/* devmap returns a pointer to a live net_device ifindex that we cannot
* allow to be modified from bpf side. So do not allow lookup elements
* for now.
*/
case BPF_MAP_TYPE_DEVMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
/* Restrict bpf side of cpumap and xskmap, open when use-cases
* appear.
*/
case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem)
goto error;
break;
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash)
goto error;
break;
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
if (func_id != BPF_FUNC_sk_select_reuseport)
goto error;
break;
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
if (func_id != BPF_FUNC_map_peek_elem &&
func_id != BPF_FUNC_map_pop_elem &&
func_id != BPF_FUNC_map_push_elem)
goto error;
break;
default:
break;
}
/* ... and second from the function itself. */
switch (func_id) {
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
if (env->subprog_cnt > 1) {
verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
return -EINVAL;
}
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
case BPF_FUNC_perf_event_read_value:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
case BPF_FUNC_current_task_under_cgroup:
case BPF_FUNC_skb_under_cgroup:
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error;
break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
map->map_type != BPF_MAP_TYPE_CPUMAP &&
map->map_type != BPF_MAP_TYPE_XSKMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_map:
case BPF_FUNC_msg_redirect_map:
case BPF_FUNC_sock_map_update:
if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
goto error;
break;
case BPF_FUNC_sk_redirect_hash:
case BPF_FUNC_msg_redirect_hash:
case BPF_FUNC_sock_hash_update:
if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
case BPF_FUNC_get_local_storage:
if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
goto error;
break;
case BPF_FUNC_sk_select_reuseport:
if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
goto error;
break;
case BPF_FUNC_map_peek_elem:
case BPF_FUNC_map_pop_elem:
case BPF_FUNC_map_push_elem:
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
map->map_type != BPF_MAP_TYPE_STACK)
goto error;
break;
default:
break;
}
return 0;
error:
verbose(env, "cannot pass map_type %d into func %s#%d\n",
map->map_type, func_id_name(func_id), func_id);
return -EINVAL;
}
static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
{
int count = 0;
if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
count++;
if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
count++;
/* We only support one arg being in raw mode at the moment,
* which is sufficient for the helper functions we have
* right now.
*/
return count <= 1;
}
static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
enum bpf_arg_type arg_next)
{
return (arg_type_is_mem_ptr(arg_curr) &&
!arg_type_is_mem_size(arg_next)) ||
(!arg_type_is_mem_ptr(arg_curr) &&
arg_type_is_mem_size(arg_next));
}
static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
{
/* bpf_xxx(..., buf, len) call will access 'len'
* bytes from memory 'buf'. Both arg types need
* to be paired, so make sure there's no buggy
* helper function specification.
*/
if (arg_type_is_mem_size(fn->arg1_type) ||
arg_type_is_mem_ptr(fn->arg5_type) ||
check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
return false;
return true;
}
static bool check_refcount_ok(const struct bpf_func_proto *fn)
{
int count = 0;
if (arg_type_is_refcounted(fn->arg1_type))
count++;
if (arg_type_is_refcounted(fn->arg2_type))
count++;
if (arg_type_is_refcounted(fn->arg3_type))
count++;
if (arg_type_is_refcounted(fn->arg4_type))
count++;
if (arg_type_is_refcounted(fn->arg5_type))
count++;
/* We only support one arg being unreferenced at the moment,
* which is sufficient for the helper functions we have right now.
*/
return count <= 1;
}
static int check_func_proto(const struct bpf_func_proto *fn)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
check_refcount_ok(fn) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE.
*/
static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (reg_is_pkt_pointer_any(®s[i]))
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg_is_pkt_pointer_any(reg))
__mark_reg_unknown(reg);
}
}
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *vstate = env->cur_state;
int i;
for (i = 0; i <= vstate->curframe; i++)
__clear_all_pkt_pointers(env, vstate->frame[i]);
}
static void release_reg_references(struct bpf_verifier_env *env,
struct bpf_func_state *state, int id)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].id == id)
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg_is_refcounted(reg) && reg->id == id)
__mark_reg_unknown(reg);
}
}
/* The pointer with the specified id has released its reference to kernel
* resources. Identify all copies of the same pointer and clear the reference.
*/
static int release_reference(struct bpf_verifier_env *env,
struct bpf_call_arg_meta *meta)
{
struct bpf_verifier_state *vstate = env->cur_state;
int i;
for (i = 0; i <= vstate->curframe; i++)
release_reg_references(env, vstate->frame[i], meta->ptr_id);
return release_reference_state(env, meta->ptr_id);
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
int i, err, subprog, target_insn;
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
state->curframe + 2);
return -E2BIG;
}
target_insn = *insn_idx + insn->imm;
subprog = find_subprog(env, target_insn + 1);
if (subprog < 0) {
verbose(env, "verifier bug. No program starts at insn %d\n",
target_insn + 1);
return -EFAULT;
}
caller = state->frame[state->curframe];
if (state->frame[state->curframe + 1]) {
verbose(env, "verifier bug. Frame %d already allocated\n",
state->curframe + 1);
return -EFAULT;
}
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
if (!callee)
return -ENOMEM;
state->frame[state->curframe + 1] = callee;
/* callee cannot access r0, r6 - r9 for reading and has to write
* into its own stack before reading from it.
* callee can read/write into caller's stack
*/
init_func_state(env, callee,
/* remember the callsite, it will be used by bpf_exit */
*insn_idx /* callsite */,
state->curframe + 1 /* frameno within this callchain */,
subprog /* subprog number within this prog */);
/* Transfer references to the callee */
err = transfer_reference_state(callee, caller);
if (err)
return err;
/* copy r1 - r5 args that callee can access. The copy includes parent
* pointers, which connects us up to the liveness chain
*/
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
callee->regs[i] = caller->regs[i];
/* after the call registers r0 - r5 were scratched */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, caller->regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* only increment it after check_reg_arg() finished */
state->curframe++;
/* and go analyze first insn of the callee */
*insn_idx = target_insn;
if (env->log.level) {
verbose(env, "caller:\n");
print_verifier_state(env, caller);
verbose(env, "callee:\n");
print_verifier_state(env, callee);
}
return 0;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
struct bpf_reg_state *r0;
int err;
callee = state->frame[state->curframe];
r0 = &callee->regs[BPF_REG_0];
if (r0->type == PTR_TO_STACK) {
/* technically it's ok to return caller's stack pointer
* (or caller's caller's pointer) back to the caller,
* since these pointers are valid. Only current stack
* pointer will be invalid as soon as function exits,
* but let's be conservative
*/
verbose(env, "cannot return stack pointer to the caller\n");
return -EINVAL;
}
state->curframe--;
caller = state->frame[state->curframe];
/* return to the caller whatever r0 had in the callee */
caller->regs[BPF_REG_0] = *r0;
/* Transfer references to the caller */
err = transfer_reference_state(caller, callee);
if (err)
return err;
*insn_idx = callee->callsite + 1;
if (env->log.level) {
verbose(env, "returning from callee:\n");
print_verifier_state(env, callee);
verbose(env, "to caller at %d:\n", *insn_idx);
print_verifier_state(env, caller);
}
/* clear everything in the callee */
free_func_state(callee);
state->frame[state->curframe + 1] = NULL;
return 0;
}
static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
int func_id,
struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_probe_read_str))
return;
ret_reg->smax_value = meta->msize_smax_value;
ret_reg->umax_value = meta->msize_umax_value;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
}
static int
record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
int func_id, int insn_idx)
{
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
if (func_id != BPF_FUNC_tail_call &&
func_id != BPF_FUNC_map_lookup_elem &&
func_id != BPF_FUNC_map_update_elem &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_map_push_elem &&
func_id != BPF_FUNC_map_pop_elem &&
func_id != BPF_FUNC_map_peek_elem)
return 0;
if (meta->map_ptr == NULL) {
verbose(env, "kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
if (!BPF_MAP_PTR(aux->map_state))
bpf_map_ptr_store(aux, meta->map_ptr,
meta->map_ptr->unpriv_array);
else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
meta->map_ptr->unpriv_array);
return 0;
}
static int check_reference_leak(struct bpf_verifier_env *env)
{
struct bpf_func_state *state = cur_func(env);
int i;
for (i = 0; i < state->acquired_refs; i++) {
verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
state->refs[i].id, state->refs[i].insn_idx);
}
return state->acquired_refs ? -EINVAL : 0;
}
static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
/* find function prototype */
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
if (env->ops->get_func_proto)
fn = env->ops->get_func_proto(func_id, env->prog);
if (!fn) {
verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
func_id);
return -EINVAL;
}
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
return -EINVAL;
}
/* With LD_ABS/IND some JITs save/restore skb from r1. */
changes_data = bpf_helper_changes_pkt_data(fn->func);
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
func_id_name(func_id), func_id);
return -EINVAL;
}
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
err = check_func_proto(fn);
if (err) {
verbose(env, "kernel subsystem misconfigured func %s#%d\n",
func_id_name(func_id), func_id);
return err;
}
/* check args */
err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
if (err)
return err;
err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
if (err)
return err;
err = record_func_map(env, &meta, func_id, insn_idx);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state.
*/
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
BPF_WRITE, -1, false);
if (err)
return err;
}
if (func_id == BPF_FUNC_tail_call) {
err = check_reference_leak(env);
if (err) {
verbose(env, "tail_call would lead to reference leak\n");
return err;
}
} else if (is_release_function(func_id)) {
err = release_reference(env, &meta);
if (err)
return err;
}
regs = cur_regs(env);
/* check that flags argument in get_local_storage(map, flags) is 0,
* this is required because get_local_storage() can't return an error.
*/
if (func_id == BPF_FUNC_get_local_storage &&
!register_is_null(®s[BPF_REG_2])) {
verbose(env, "get_local_storage() doesn't support non-zero flags\n");
return -EINVAL;
}
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* update return register (already marked as written above) */
if (fn->ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */
mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
fn->ret_type == RET_PTR_TO_MAP_VALUE) {
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
*/
if (meta.map_ptr == NULL) {
verbose(env,
"kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
} else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
}
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
int id = acquire_reference_state(env, insn_idx);
if (id < 0)
return id;
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
regs[BPF_REG_0].id = id;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
return -EINVAL;
}
do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
return err;
if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
const char *err_str;
#ifdef CONFIG_PERF_EVENTS
err = get_callchain_buffers(sysctl_perf_event_max_stack);
err_str = "cannot get callchain buffer for func %s#%d\n";
#else
err = -ENOTSUPP;
err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
#endif
if (err) {
verbose(env, err_str, func_id_name(func_id), func_id);
return err;
}
env->prog->has_callchain_buf = true;
}
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
}
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
s64 res = (s64)((u64)a + (u64)b);
if (b < 0)
return res > a;
return res < a;
}
static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
if (b < 0)
return res < a;
return res > a;
}
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
s64 smin = reg->smin_value;
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n",
reg_type_str[type], val);
return false;
}
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
verbose(env, "%s pointer offset %d is not allowed\n",
reg_type_str[type], reg->off);
return false;
}
if (smin == S64_MIN) {
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
reg_type_str[type]);
return false;
}
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
verbose(env, "value %lld makes %s pointer be out of bounds\n",
smin, reg_type_str[type]);
return false;
}
return true;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
* scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
*/
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
u32 dst = insn->dst_reg, src = insn->src_reg;
u8 opcode = BPF_OP(insn->code);
dst_reg = ®s[dst];
if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
smin_val > smax_val || umin_val > umax_val) {
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
__mark_reg_unknown(dst_reg);
return 0;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
verbose(env,
"R%d 32-bit pointer arithmetic prohibited\n",
dst);
return -EACCES;
}
switch (ptr_reg->type) {
case PTR_TO_MAP_VALUE_OR_NULL:
verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case CONST_PTR_TO_MAP:
case PTR_TO_PACKET_END:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case PTR_TO_MAP_VALUE:
if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
off_reg == dst_reg ? dst : src);
return -EACCES;
}
/* fall-through */
default:
break;
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
* The id may be overwritten later if we create a new variable offset.
*/
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
switch (opcode) {
case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field
*/
if (known && (ptr_reg->off + smin_val ==
(s64)(s32)(ptr_reg->off + smin_val))) {
/* pointer += K. Accumulate it into fixed offset */
dst_reg->smin_value = smin_ptr;
dst_reg->smax_value = smax_ptr;
dst_reg->umin_value = umin_ptr;
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->off = ptr_reg->off + smin_val;
dst_reg->raw = ptr_reg->raw;
break;
}
/* A new variable offset is created. Note that off_reg->off
* == 0, since it's a scalar.
* dst_reg gets the pointer type and since some positive
* integer value was added to the pointer, give it a new 'id'
* if it's a PTR_TO_PACKET.
* this creates a new 'base' pointer, off_reg (variable) gets
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
if (signed_add_overflows(smin_ptr, smin_val) ||
signed_add_overflows(smax_ptr, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr + smin_val;
dst_reg->smax_value = smax_ptr + smax_val;
}
if (umin_ptr + umin_val < umin_ptr ||
umax_ptr + umax_val < umax_ptr) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value = umin_ptr + umin_val;
dst_reg->umax_value = umax_ptr + umax_val;
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
dst_reg->raw = 0;
}
break;
case BPF_SUB:
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
verbose(env, "R%d tried to subtract pointer from scalar\n",
dst);
return -EACCES;
}
/* We don't allow subtraction from FP, because (according to
* test_verifier.c test "invalid fp arithmetic", JITs might not
* be able to deal with it.
*/
if (ptr_reg->type == PTR_TO_STACK) {
verbose(env, "R%d subtraction from stack pointer prohibited\n",
dst);
return -EACCES;
}
if (known && (ptr_reg->off - smin_val ==
(s64)(s32)(ptr_reg->off - smin_val))) {
/* pointer -= K. Subtract it from fixed offset */
dst_reg->smin_value = smin_ptr;
dst_reg->smax_value = smax_ptr;
dst_reg->umin_value = umin_ptr;
dst_reg->umax_value = umax_ptr;
dst_reg->var_off = ptr_reg->var_off;
dst_reg->id = ptr_reg->id;
dst_reg->off = ptr_reg->off - smin_val;
dst_reg->raw = ptr_reg->raw;
break;
}
/* A new variable offset is created. If the subtrahend is known
* nonnegative, then any reg->range we had before is still good.
*/
if (signed_sub_overflows(smin_ptr, smax_val) ||
signed_sub_overflows(smax_ptr, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr - smax_val;
dst_reg->smax_value = smax_ptr - smin_val;
}
if (umin_ptr < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value = umin_ptr - umax_val;
dst_reg->umax_value = umax_ptr - umin_val;
}
dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
dst_reg->id = ++env->id_gen;
/* something was added to pkt_ptr, set range to zero */
if (smin_val < 0)
dst_reg->raw = 0;
}
break;
case BPF_AND:
case BPF_OR:
case BPF_XOR:
/* bitwise ops on pointers are troublesome, prohibit. */
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
default:
/* other operators (e.g. MUL,LSH) produce non-pointer results */
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
}
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
return -EINVAL;
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
/* For unprivileged we require that resulting offset must be in bounds
* in order to be able to sanitize access later on.
*/
if (!env->allow_ptr_leaks) {
if (dst_reg->type == PTR_TO_MAP_VALUE &&
check_map_access(env, dst, dst_reg->off, 1, false)) {
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
} else if (dst_reg->type == PTR_TO_STACK &&
check_stack_access(env, dst_reg, dst_reg->off +
dst_reg->var_off.value, 1)) {
verbose(env, "R%d stack pointer arithmetic goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
}
}
return 0;
}
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
*/
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
if (insn_bitness == 32) {
/* Relevant for 32-bit RSH: Information can propagate towards
* LSB, so it isn't sufficient to only truncate the output to
* 32 bits.
*/
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
smin_val > smax_val || umin_val > umax_val) {
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
__mark_reg_unknown(dst_reg);
return 0;
}
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_ARSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
*/
dst_reg->smin_value >>= umin_val;
dst_reg->smax_value >>= umin_val;
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
*/
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
* and var_off.
*/
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
dst_reg = ®s[insn->dst_reg];
src_reg = NULL;
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
if (BPF_SRC(insn->code) == BPF_X) {
src_reg = ®s[insn->src_reg];
if (src_reg->type != SCALAR_VALUE) {
if (dst_reg->type != SCALAR_VALUE) {
/* Combining two pointers by any ALU op yields
* an arbitrary scalar. Disallow all math except
* pointer subtraction
*/
if (opcode == BPF_SUB && env->allow_ptr_leaks) {
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
}
verbose(env, "R%d pointer %s pointer prohibited\n",
insn->dst_reg,
bpf_alu_string[opcode >> 4]);
return -EACCES;
} else {
/* scalar += pointer
* This is legal, but we have to reverse our
* src/dest handling in computing the range
*/
return adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg);
}
} else if (ptr_reg) {
/* pointer += scalar */
return adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
}
} else {
/* Pretend the src is a reg with a known value, since we only
* need to be able to read from this state.
*/
off_reg.type = SCALAR_VALUE;
__mark_reg_known(&off_reg, insn->imm);
src_reg = &off_reg;
if (ptr_reg) /* pointer += K */
return adjust_ptr_min_max_vals(env, insn,
ptr_reg, src_reg);
}
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
print_verifier_state(env, state);
verbose(env, "verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
print_verifier_state(env, state);
verbose(env, "verifier internal error: no src_reg\n");
return -EINVAL;
}
return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
}
/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode == BPF_END || opcode == BPF_NEG) {
if (opcode == BPF_NEG) {
if (BPF_SRC(insn->code) != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->off != 0 || insn->imm != 0) {
verbose(env, "BPF_NEG uses reserved fields\n");
return -EINVAL;
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
BPF_CLASS(insn->code) == BPF_ALU64) {
verbose(env, "BPF_END uses reserved fields\n");
return -EINVAL;
}
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer arithmetic prohibited\n",
insn->dst_reg);
return -EACCES;
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
}
/* check dest operand, mark as required later */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = regs + insn->src_reg;
struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
verbose(env,
"R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
} else {
mark_reg_unknown(env, regs,
insn->dst_reg);
}
coerce_reg_to_size(dst_reg, 4);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
/* clear any state __mark_reg_known doesn't set */
mark_reg_unknown(env, regs, insn->dst_reg);
regs[insn->dst_reg].type = SCALAR_VALUE;
if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
insn->imm);
} else {
__mark_reg_known(regs + insn->dst_reg,
(u32)insn->imm);
}
}
} else if (opcode > BPF_END) {
verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
return -EINVAL;
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
verbose(env, "div by zero\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
if (insn->imm < 0 || insn->imm >= size) {
verbose(env, "invalid shift %d\n", insn->imm);
return -EINVAL;
}
}
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
return adjust_reg_min_max_vals(env, insn);
}
return 0;
}
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
int i, j;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
/* This doesn't give us any range */
return;
if (dst_reg->umax_value > MAX_PACKET_OFF ||
dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
/* Risk of overflow. For instance, ptr + (1<<63) may be less
* than pkt_end, but that's because it's also less than pkt.
*/
return;
new_range = dst_reg->off;
if (range_right_open)
new_range--;
/* Examples for register markings:
*
* pkt_data in dst register:
*
* r2 = r3;
* r2 += 8;
* if (r2 > pkt_end) goto <handle exception>
* <access okay>
*
* r2 = r3;
* r2 += 8;
* if (r2 < pkt_end) goto <access okay>
* <handle exception>
*
* Where:
* r2 == dst_reg, pkt_end == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* pkt_data in src register:
*
* r2 = r3;
* r2 += 8;
* if (pkt_end >= r2) goto <access okay>
* <handle exception>
*
* r2 = r3;
* r2 += 8;
* if (pkt_end <= r2) goto <handle exception>
* <access okay>
*
* Where:
* pkt_end == dst_reg, r2 == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
* or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
* and [r3, r3 + 8-1) respectively is safe to access depending on
* the check.
*/
/* If our ids match, then we must have the same max_value. And we
* don't care about the other reg's fixed offset, since if it's too big
* the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == type && regs[i].id == dst_reg->id)
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
}
/* compute branch direction of the expression "if (reg opcode val) goto target;"
* and return:
* 1 - branch will be taken and "goto target" will be executed
* 0 - branch will not be taken and fall-through to next insn
* -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
*/
static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
{
if (__is_pointer_value(false, reg))
return -1;
switch (opcode) {
case BPF_JEQ:
if (tnum_is_const(reg->var_off))
return !!tnum_equals_const(reg->var_off, val);
break;
case BPF_JNE:
if (tnum_is_const(reg->var_off))
return !tnum_equals_const(reg->var_off, val);
break;
case BPF_JSET:
if ((~reg->var_off.mask & reg->var_off.value) & val)
return 1;
if (!((reg->var_off.mask | reg->var_off.value) & val))
return 0;
break;
case BPF_JGT:
if (reg->umin_value > val)
return 1;
else if (reg->umax_value <= val)
return 0;
break;
case BPF_JSGT:
if (reg->smin_value > (s64)val)
return 1;
else if (reg->smax_value < (s64)val)
return 0;
break;
case BPF_JLT:
if (reg->umax_value < val)
return 1;
else if (reg->umin_value >= val)
return 0;
break;
case BPF_JSLT:
if (reg->smax_value < (s64)val)
return 1;
else if (reg->smin_value >= (s64)val)
return 0;
break;
case BPF_JGE:
if (reg->umin_value >= val)
return 1;
else if (reg->umax_value < val)
return 0;
break;
case BPF_JSGE:
if (reg->smin_value >= (s64)val)
return 1;
else if (reg->smax_value < (s64)val)
return 0;
break;
case BPF_JLE:
if (reg->umax_value <= val)
return 1;
else if (reg->umin_value > val)
return 0;
break;
case BPF_JSLE:
if (reg->smax_value <= (s64)val)
return 1;
else if (reg->smin_value > (s64)val)
return 0;
break;
}
return -1;
}
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
* In JEQ/JNE cases we also adjust the var_off values.
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
* the same object, but we don't bother with that.
* Since false_reg and true_reg have the same type by construction, we
* only need to check one of them for pointerness.
*/
if (__is_pointer_value(false, false_reg))
return;
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
__mark_reg_known(true_reg, val);
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
__mark_reg_known(false_reg, val);
break;
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
if (is_power_of_2(val))
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
case BPF_JGT:
false_reg->umax_value = min(false_reg->umax_value, val);
true_reg->umin_value = max(true_reg->umin_value, val + 1);
break;
case BPF_JSGT:
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
break;
case BPF_JLT:
false_reg->umin_value = max(false_reg->umin_value, val);
true_reg->umax_value = min(true_reg->umax_value, val - 1);
break;
case BPF_JSLT:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
break;
case BPF_JGE:
false_reg->umax_value = min(false_reg->umax_value, val - 1);
true_reg->umin_value = max(true_reg->umin_value, val);
break;
case BPF_JSGE:
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
break;
case BPF_JLE:
false_reg->umin_value = max(false_reg->umin_value, val + 1);
true_reg->umax_value = min(true_reg->umax_value, val);
break;
case BPF_JSLE:
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
break;
default:
break;
}
__reg_deduce_bounds(false_reg);
__reg_deduce_bounds(true_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(false_reg);
__update_reg_bounds(true_reg);
}
/* Same as above, but for the case that dst_reg holds a constant and src_reg is
* the variable reg.
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val,
u8 opcode)
{
if (__is_pointer_value(false, false_reg))
return;
switch (opcode) {
case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure.
*/
__mark_reg_known(true_reg, val);
break;
case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure;
*/
__mark_reg_known(false_reg, val);
break;
case BPF_JSET:
false_reg->var_off = tnum_and(false_reg->var_off,
tnum_const(~val));
if (is_power_of_2(val))
true_reg->var_off = tnum_or(true_reg->var_off,
tnum_const(val));
break;
case BPF_JGT:
true_reg->umax_value = min(true_reg->umax_value, val - 1);
false_reg->umin_value = max(false_reg->umin_value, val);
break;
case BPF_JSGT:
true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
break;
case BPF_JLT:
true_reg->umin_value = max(true_reg->umin_value, val + 1);
false_reg->umax_value = min(false_reg->umax_value, val);
break;
case BPF_JSLT:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
break;
case BPF_JGE:
true_reg->umax_value = min(true_reg->umax_value, val);
false_reg->umin_value = max(false_reg->umin_value, val + 1);
break;
case BPF_JSGE:
true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
break;
case BPF_JLE:
true_reg->umin_value = max(true_reg->umin_value, val);
false_reg->umax_value = min(false_reg->umax_value, val - 1);
break;
case BPF_JSLE:
true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
break;
default:
break;
}
__reg_deduce_bounds(false_reg);
__reg_deduce_bounds(true_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg);
__reg_bound_offset(true_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(false_reg);
__update_reg_bounds(true_reg);
}
/* Regs are known to be equal, so intersect their min/max/var_off */
static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
struct bpf_reg_state *dst_reg)
{
src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
dst_reg->umin_value);
src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
dst_reg->umax_value);
src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
dst_reg->smin_value);
src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
dst_reg->smax_value);
src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
dst_reg->var_off);
/* We might have learned new bounds from the var_off. */
__update_reg_bounds(src_reg);
__update_reg_bounds(dst_reg);
/* We might have learned something about the sign bit. */
__reg_deduce_bounds(src_reg);
__reg_deduce_bounds(dst_reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(src_reg);
__reg_bound_offset(dst_reg);
/* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax.
*/
__update_reg_bounds(src_reg);
__update_reg_bounds(dst_reg);
}
static void reg_combine_min_max(struct bpf_reg_state *true_src,
struct bpf_reg_state *true_dst,
struct bpf_reg_state *false_src,
struct bpf_reg_state *false_dst,
u8 opcode)
{
switch (opcode) {
case BPF_JEQ:
__reg_combine_min_max(true_src, true_dst);
break;
case BPF_JNE:
__reg_combine_min_max(false_src, false_dst);
break;
}
}
static void mark_ptr_or_null_reg(struct bpf_func_state *state,
struct bpf_reg_state *reg, u32 id,
bool is_null)
{
if (reg_type_may_be_null(reg->type) && reg->id == id) {
/* Old offset (both fixed and variable parts) should
* have been known-zero, because we don't allow pointer
* arithmetic on pointers that might be NULL.
*/
if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
!tnum_equals_const(reg->var_off, 0) ||
reg->off)) {
__mark_reg_known_zero(reg);
reg->off = 0;
}
if (is_null) {
reg->type = SCALAR_VALUE;
} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
if (reg->map_ptr->inner_map_meta) {
reg->type = CONST_PTR_TO_MAP;
reg->map_ptr = reg->map_ptr->inner_map_meta;
} else {
reg->type = PTR_TO_MAP_VALUE;
}
} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
reg->type = PTR_TO_SOCKET;
}
if (is_null || !reg_is_refcounted(reg)) {
/* We don't need id from this point onwards anymore,
* thus we should better reset it, so that state
* pruning has chances to take effect.
*/
reg->id = 0;
}
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg, *regs = state->regs;
u32 id = regs[regno].id;
int i, j;
if (reg_is_refcounted_or_null(®s[regno]) && is_null)
__release_reference_state(state, id);
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, ®s[i], id, is_null);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
mark_ptr_or_null_reg(state, reg, id, is_null);
}
}
}
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg,
struct bpf_verifier_state *this_branch,
struct bpf_verifier_state *other_branch)
{
if (BPF_SRC(insn->code) != BPF_X)
return false;
switch (BPF_OP(insn->code)) {
case BPF_JGT:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end > pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, true);
} else {
return false;
}
break;
case BPF_JLT:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end < pkt_data', pkt_data > pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, false);
} else {
return false;
}
break;
case BPF_JGE:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
find_good_pkt_pointers(this_branch, dst_reg,
dst_reg->type, true);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
find_good_pkt_pointers(other_branch, src_reg,
src_reg->type, false);
} else {
return false;
}
break;
case BPF_JLE:
if ((dst_reg->type == PTR_TO_PACKET &&
src_reg->type == PTR_TO_PACKET_END) ||
(dst_reg->type == PTR_TO_PACKET_META &&
reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
find_good_pkt_pointers(other_branch, dst_reg,
dst_reg->type, false);
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
src_reg->type == PTR_TO_PACKET) ||
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
src_reg->type == PTR_TO_PACKET_META)) {
/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
find_good_pkt_pointers(this_branch, src_reg,
src_reg->type, true);
} else {
return false;
}
break;
default:
return false;
}
return true;
}
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_JSLE) {
verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
if (BPF_SRC(insn->code) == BPF_K) {
int pred = is_branch_taken(dst_reg, insn->imm, opcode);
if (pred == 1) {
/* only follow the goto, ignore fall-through */
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
/* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same
* object, I suppose, but we don't support that right now), because
* otherwise the different base pointers mean the offsets aren't
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
®s[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
®s[insn->src_reg],
®s[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
reg_type_may_be_null(dst_reg->type)) {
/* Mark all identical registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
mark_ptr_or_null_regs(this_branch, insn->dst_reg,
opcode == BPF_JNE);
mark_ptr_or_null_regs(other_branch, insn->dst_reg,
opcode == BPF_JEQ);
} else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
this_branch, other_branch) &&
is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->dst_reg);
return -EACCES;
}
if (env->log.level)
print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}
/* return the map pointer stored inside BPF_LD_IMM64 instruction */
static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
{
u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
return (struct bpf_map *) (unsigned long) imm64;
}
/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
verbose(env, "invalid BPF_LD_IMM insn\n");
return -EINVAL;
}
if (insn->off != 0) {
verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
return -EINVAL;
}
err = check_reg_arg(env, insn->dst_reg, DST_OP);
if (err)
return err;
if (insn->src_reg == 0) {
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(®s[insn->dst_reg], imm);
return 0;
}
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
return 0;
}
static bool may_access_skb(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
return true;
default:
return false;
}
}
/* verify safety of LD_ABS|LD_IND instructions:
* - they can only appear in the programs where ctx == skb
* - since they are wrappers of function calls, they scratch R1-R5 registers,
* preserve R6-R9, and store return value into R0
*
* Implicit input:
* ctx == skb == R6 == CTX
*
* Explicit input:
* SRC == any register
* IMM == 32-bit immediate
*
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 mode = BPF_MODE(insn->code);
int i, err;
if (!may_access_skb(env->prog->type)) {
verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
return -EINVAL;
}
if (!env->ops->gen_ld_abs) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
if (env->subprog_cnt > 1) {
/* when program has LD_ABS insn JITs and interpreter assume
* that r1 == ctx == skb which is not the case for callees
* that can have arbitrary arguments. It's problematic
* for main prog as well since JITs would need to analyze
* all functions in order to make proper register save/restore
* decisions in the main prog. Hence disallow LD_ABS with calls
*/
verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
return -EINVAL;
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
return -EINVAL;
}
/* check whether implicit source operand (register R6) is readable */
err = check_reg_arg(env, BPF_REG_6, SRC_OP);
if (err)
return err;
/* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
* gen_ld_abs() may terminate the program at runtime, leading to
* reference leak.
*/
err = check_reference_leak(env);
if (err) {
verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
return err;
}
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
}
if (mode == BPF_IND) {
/* check explicit source operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
}
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
}
/* mark destination R0 register as readable, since it contains
* the value fetched from the packet.
* Already marked as written above.
*/
mark_reg_unknown(env, regs, BPF_REG_0);
return 0;
}
static int check_return_code(struct bpf_verifier_env *env)
{
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
switch (env->prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_CGROUP_DEVICE:
break;
default:
return 0;
}
reg = cur_regs(env) + BPF_REG_0;
if (reg->type != SCALAR_VALUE) {
verbose(env, "At program exit the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]);
return -EINVAL;
}
if (!tnum_in(range, reg->var_off)) {
verbose(env, "At program exit the register R0 ");
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "has value %s", tn_buf);
} else {
verbose(env, "has unknown scalar value");
}
verbose(env, " should have been 0 or 1\n");
return -EINVAL;
}
return 0;
}
/* non-recursive DFS pseudo code
* 1 procedure DFS-iterative(G,v):
* 2 label v as discovered
* 3 let S be a stack
* 4 S.push(v)
* 5 while S is not empty
* 6 t <- S.pop()
* 7 if t is what we're looking for:
* 8 return t
* 9 for all edges e in G.adjacentEdges(t) do
* 10 if edge e is already labelled
* 11 continue with the next edge
* 12 w <- G.adjacentVertex(t,e)
* 13 if vertex w is not discovered and not explored
* 14 label e as tree-edge
* 15 label w as discovered
* 16 S.push(w)
* 17 continue at 5
* 18 else if vertex w is discovered
* 19 label e as back-edge
* 20 else
* 21 // vertex w is explored
* 22 label e as forward- or cross-edge
* 23 label t as explored
* 24 S.pop()
*
* convention:
* 0x10 - discovered
* 0x11 - discovered and fall-through edge labelled
* 0x12 - discovered and fall-through and branch edges labelled
* 0x20 - explored
*/
enum {
DISCOVERED = 0x10,
EXPLORED = 0x20,
FALLTHROUGH = 1,
BRANCH = 2,
};
#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
static int *insn_state;
/* t, w, e - match pseudo-code above:
* t - index of current instruction
* w - next instruction
* e - edge
*/
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
return 0;
if (w < 0 || w >= env->prog->len) {
verbose_linfo(env, t, "%d: ", t);
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
if (e == BRANCH)
/* mark branch target for state pruning */
env->explored_states[w] = STATE_LIST_MARK;
if (insn_state[w] == 0) {
/* tree-edge */
insn_state[t] = DISCOVERED | e;
insn_state[w] = DISCOVERED;
if (cur_stack >= env->prog->len)
return -E2BIG;
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
/* forward- or cross-edge */
insn_state[t] = DISCOVERED | e;
} else {
verbose(env, "insn state internal bug\n");
return -EFAULT;
}
return 0;
}
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
static int check_cfg(struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
int ret = 0;
int i, t;
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_stack) {
kfree(insn_state);
return -ENOMEM;
}
insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
insn_stack[0] = 0; /* 0 is the first instruction */
cur_stack = 1;
peek_stack:
if (cur_stack == 0)
goto check_state;
t = insn_stack[cur_stack - 1];
if (BPF_CLASS(insns[t].code) == BPF_JMP) {
u8 opcode = BPF_OP(insns[t].code);
if (opcode == BPF_EXIT) {
goto mark_explored;
} else if (opcode == BPF_CALL) {
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
if (insns[t].src_reg == BPF_PSEUDO_CALL) {
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
goto err_free;
}
/* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1,
FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
} else {
/* conditional jump with two edges */
env->explored_states[t] = STATE_LIST_MARK;
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
} else {
/* all other non-branch instructions with single
* fall-through edge
*/
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret == 1)
goto peek_stack;
else if (ret < 0)
goto err_free;
}
mark_explored:
insn_state[t] = EXPLORED;
if (cur_stack-- <= 0) {
verbose(env, "pop stack internal bug\n");
ret = -EFAULT;
goto err_free;
}
goto peek_stack;
check_state:
for (i = 0; i < insn_cnt; i++) {
if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
}
ret = 0; /* cfg looks good */
err_free:
kfree(insn_state);
kfree(insn_stack);
return ret;
}
/* The minimum supported BTF func info size */
#define MIN_BPF_FUNCINFO_SIZE 8
#define MAX_FUNCINFO_REC_SIZE 252
static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
u32 i, nfuncs, urec_size, min_size, prev_offset;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
int ret = 0;
nfuncs = attr->func_info_cnt;
if (!nfuncs)
return 0;
if (nfuncs != env->subprog_cnt) {
verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
return -EINVAL;
}
urec_size = attr->func_info_rec_size;
if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
urec_size > MAX_FUNCINFO_REC_SIZE ||
urec_size % sizeof(u32)) {
verbose(env, "invalid func info rec size %u\n", urec_size);
return -EINVAL;
}
prog = env->prog;
btf = prog->aux->btf;
urecord = u64_to_user_ptr(attr->func_info);
min_size = min_t(u32, krec_size, urec_size);
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
if (!krecord)
return -ENOMEM;
for (i = 0; i < nfuncs; i++) {
ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
if (ret) {
if (ret == -E2BIG) {
verbose(env, "nonzero tailing record in func info");
/* set the size kernel expects so loader can zero
* out the rest of the record.
*/
if (put_user(min_size, &uattr->func_info_rec_size))
ret = -EFAULT;
}
goto err_free;
}
if (copy_from_user(&krecord[i], urecord, min_size)) {
ret = -EFAULT;
goto err_free;
}
/* check insn_off */
if (i == 0) {
if (krecord[i].insn_off) {
verbose(env,
"nonzero insn_off %u for the first func info record",
krecord[i].insn_off);
ret = -EINVAL;
goto err_free;
}
} else if (krecord[i].insn_off <= prev_offset) {
verbose(env,
"same or smaller insn offset (%u) than previous func info record (%u)",
krecord[i].insn_off, prev_offset);
ret = -EINVAL;
goto err_free;
}
if (env->subprog_info[i].start != krecord[i].insn_off) {
verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
ret = -EINVAL;
goto err_free;
}
/* check type_id */
type = btf_type_by_id(btf, krecord[i].type_id);
if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
verbose(env, "invalid type id %d in func info",
krecord[i].type_id);
ret = -EINVAL;
goto err_free;
}
prev_offset = krecord[i].insn_off;
urecord += urec_size;
}
prog->aux->func_info = krecord;
prog->aux->func_info_cnt = nfuncs;
return 0;
err_free:
kvfree(krecord);
return ret;
}
static void adjust_btf_func(struct bpf_verifier_env *env)
{
int i;
if (!env->prog->aux->func_info)
return;
for (i = 0; i < env->subprog_cnt; i++)
env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
}
#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
sizeof(((struct bpf_line_info *)(0))->line_col))
#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
static int check_btf_line(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
struct bpf_subprog_info *sub;
struct bpf_line_info *linfo;
struct bpf_prog *prog;
const struct btf *btf;
void __user *ulinfo;
int err;
nr_linfo = attr->line_info_cnt;
if (!nr_linfo)
return 0;
rec_size = attr->line_info_rec_size;
if (rec_size < MIN_BPF_LINEINFO_SIZE ||
rec_size > MAX_LINEINFO_REC_SIZE ||
rec_size & (sizeof(u32) - 1))
return -EINVAL;
/* Need to zero it in case the userspace may
* pass in a smaller bpf_line_info object.
*/
linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
GFP_KERNEL | __GFP_NOWARN);
if (!linfo)
return -ENOMEM;
prog = env->prog;
btf = prog->aux->btf;
s = 0;
sub = env->subprog_info;
ulinfo = u64_to_user_ptr(attr->line_info);
expected_size = sizeof(struct bpf_line_info);
ncopy = min_t(u32, expected_size, rec_size);
for (i = 0; i < nr_linfo; i++) {
err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
if (err) {
if (err == -E2BIG) {
verbose(env, "nonzero tailing record in line_info");
if (put_user(expected_size,
&uattr->line_info_rec_size))
err = -EFAULT;
}
goto err_free;
}
if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
err = -EFAULT;
goto err_free;
}
/*
* Check insn_off to ensure
* 1) strictly increasing AND
* 2) bounded by prog->len
*
* The linfo[0].insn_off == 0 check logically falls into
* the later "missing bpf_line_info for func..." case
* because the first linfo[0].insn_off must be the
* first sub also and the first sub must have
* subprog_info[0].start == 0.
*/
if ((i && linfo[i].insn_off <= prev_offset) ||
linfo[i].insn_off >= prog->len) {
verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
i, linfo[i].insn_off, prev_offset,
prog->len);
err = -EINVAL;
goto err_free;
}
if (!prog->insnsi[linfo[i].insn_off].code) {
verbose(env,
"Invalid insn code at line_info[%u].insn_off\n",
i);
err = -EINVAL;
goto err_free;
}
if (!btf_name_by_offset(btf, linfo[i].line_off) ||
!btf_name_by_offset(btf, linfo[i].file_name_off)) {
verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
err = -EINVAL;
goto err_free;
}
if (s != env->subprog_cnt) {
if (linfo[i].insn_off == sub[s].start) {
sub[s].linfo_idx = i;
s++;
} else if (sub[s].start < linfo[i].insn_off) {
verbose(env, "missing bpf_line_info for func#%u\n", s);
err = -EINVAL;
goto err_free;
}
}
prev_offset = linfo[i].insn_off;
ulinfo += rec_size;
}
if (s != env->subprog_cnt) {
verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
env->subprog_cnt - s, s);
err = -EINVAL;
goto err_free;
}
prog->aux->linfo = linfo;
prog->aux->nr_linfo = nr_linfo;
return 0;
err_free:
kvfree(linfo);
return err;
}
static int check_btf_info(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct btf *btf;
int err;
if (!attr->func_info_cnt && !attr->line_info_cnt)
return 0;
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf))
return PTR_ERR(btf);
env->prog->aux->btf = btf;
err = check_btf_func(env, attr, uattr);
if (err)
return err;
err = check_btf_line(env, attr, uattr);
if (err)
return err;
return 0;
}
/* check %cur's range satisfies %old's */
static bool range_within(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
{
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
old->smin_value <= cur->smin_value &&
old->smax_value >= cur->smax_value;
}
/* Maximum number of register states that can exist at once */
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
struct idpair {
u32 old;
u32 cur;
};
/* If in the old state two registers had the same id, then they need to have
* the same id in the new state as well. But that id could be different from
* the old state, so we need to track the mapping from old to new ids.
* Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
* regs with old id 5 must also have new id 9 for the new state to be safe. But
* regs with a different old id could still have new id 9, we don't care about
* that.
* So we look through our idmap to see if this old id has been seen before. If
* so, we require the new id to match; otherwise, we add the id pair to the map.
*/
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
{
unsigned int i;
for (i = 0; i < ID_MAP_SIZE; i++) {
if (!idmap[i].old) {
/* Reached an empty slot; haven't seen this id before */
idmap[i].old = old_id;
idmap[i].cur = cur_id;
return true;
}
if (idmap[i].old == old_id)
return idmap[i].cur == cur_id;
}
/* We ran out of idmap slots, which should be impossible */
WARN_ON_ONCE(1);
return false;
}
static void clean_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *st)
{
enum bpf_reg_liveness live;
int i, j;
for (i = 0; i < BPF_REG_FP; i++) {
live = st->regs[i].live;
/* liveness must not touch this register anymore */
st->regs[i].live |= REG_LIVE_DONE;
if (!(live & REG_LIVE_READ))
/* since the register is unused, clear its state
* to make further comparison simpler
*/
__mark_reg_not_init(&st->regs[i]);
}
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
live = st->stack[i].spilled_ptr.live;
/* liveness must not touch this stack slot anymore */
st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
if (!(live & REG_LIVE_READ)) {
__mark_reg_not_init(&st->stack[i].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
st->stack[i].slot_type[j] = STACK_INVALID;
}
}
}
static void clean_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state *st)
{
int i;
if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
/* all regs in this state in all frames were already marked */
return;
for (i = 0; i <= st->curframe; i++)
clean_func_state(env, st->frame[i]);
}
/* the parentage chains form a tree.
* the verifier states are added to state lists at given insn and
* pushed into state stack for future exploration.
* when the verifier reaches bpf_exit insn some of the verifer states
* stored in the state lists have their final liveness state already,
* but a lot of states will get revised from liveness point of view when
* the verifier explores other branches.
* Example:
* 1: r0 = 1
* 2: if r1 == 100 goto pc+1
* 3: r0 = 2
* 4: exit
* when the verifier reaches exit insn the register r0 in the state list of
* insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
* of insn 2 and goes exploring further. At the insn 4 it will walk the
* parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
*
* Since the verifier pushes the branch states as it sees them while exploring
* the program the condition of walking the branch instruction for the second
* time means that all states below this branch were already explored and
* their final liveness markes are already propagated.
* Hence when the verifier completes the search of state list in is_state_visited()
* we can call this clean_live_states() function to mark all liveness states
* as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
* will not be used.
* This function also clears the registers and stack for states that !READ
* to simplify state merging.
*
* Important note here that walking the same branch instruction in the callee
* doesn't meant that the states are DONE. The verifier has to compare
* the callsites
*/
static void clean_live_states(struct bpf_verifier_env *env, int insn,
struct bpf_verifier_state *cur)
{
struct bpf_verifier_state_list *sl;
int i;
sl = env->explored_states[insn];
if (!sl)
return;
while (sl != STATE_LIST_MARK) {
if (sl->state.curframe != cur->curframe)
goto next;
for (i = 0; i <= cur->curframe; i++)
if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
goto next;
clean_verifier_state(env, &sl->state);
next:
sl = sl->next;
}
}
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
{
bool equal;
if (!(rold->live & REG_LIVE_READ))
/* explored state didn't use this */
return true;
equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
if (rold->type == PTR_TO_STACK)
/* two stack pointers are equal only if they're pointing to
* the same stack frame, since fp-8 in foo != fp-8 in bar
*/
return equal && rold->frameno == rcur->frameno;
if (equal)
return true;
if (rold->type == NOT_INIT)
/* explored state can't have used this */
return true;
if (rcur->type == NOT_INIT)
return false;
switch (rold->type) {
case SCALAR_VALUE:
if (rcur->type == SCALAR_VALUE) {
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* We're trying to use a pointer in place of a scalar.
* Even if the scalar was unbounded, this could lead to
* pointer leaks because scalars are allowed to leak
* while pointers are not. We could make this safe in
* special cases if root is calling us, but it's
* probably not worth the hassle.
*/
return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
* everything else matches, we are OK.
* We don't care about the 'id' value, because nothing
* uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
*/
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_VALUE_OR_NULL:
/* a PTR_TO_MAP_VALUE could be safe to use as a
* PTR_TO_MAP_VALUE_OR_NULL into the same map.
* However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
* checked, doing so could have affected others with the same
* id, and we can't check for that because we lost the id when
* we converted to a PTR_TO_MAP_VALUE.
*/
if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
return false;
if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
return false;
/* Check our ids match any regs they're supposed to */
return check_ids(rold->id, rcur->id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
if (rcur->type != rold->type)
return false;
/* We must have at least as much range as the old ptr
* did, so that any accesses which were safe before are
* still safe. This is true even if old range < old off,
* since someone could have accessed through (ptr - k), or
* even done ptr -= k in a register, to get a safe access.
*/
if (rold->range > rcur->range)
return false;
/* If the offsets don't match, we can't trust our alignment;
* nor can we be sure that we won't fall out of range.
*/
if (rold->off != rcur->off)
return false;
/* id relations must be preserved */
if (rold->id && !check_ids(rold->id, rcur->id, idmap))
return false;
/* new val must satisfy old val knowledge */
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_PACKET_END:
case PTR_TO_FLOW_KEYS:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
default:
/* Don't know what's going on, just say it's not safe */
return false;
}
/* Shouldn't get here; if we do, say it's not safe */
WARN_ON_ONCE(1);
return false;
}
static bool stacksafe(struct bpf_func_state *old,
struct bpf_func_state *cur,
struct idpair *idmap)
{
int i, spi;
/* walk slots of the explored stack and ignore any additional
* slots in the current stack, since explored(safe) state
* didn't use them
*/
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
i += BPF_REG_SIZE - 1;
/* explored state didn't use this */
continue;
}
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
/* explored stack has more populated slots than current stack
* and these slots were used
*/
if (i >= cur->allocated_stack)
return false;
/* if old state was safe with misc data in the stack
* it will be safe with zero-initialized stack.
* The opposite is not true
*/
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
* this stack slot, but current has has STACK_MISC ->
* this verifier states are not equivalent,
* return false to continue verification of this path
*/
return false;
if (i % BPF_REG_SIZE)
continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL)
continue;
if (!regsafe(&old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr,
idmap))
/* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
* (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
* but current path has stored:
* (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
return false;
}
return true;
}
static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
{
if (old->acquired_refs != cur->acquired_refs)
return false;
return !memcmp(old->refs, cur->refs,
sizeof(*old->refs) * old->acquired_refs);
}
/* compare two verifier states
*
* all states stored in state_list are known to be valid, since
* verifier reached 'bpf_exit' instruction through them
*
* this function is called when verifier exploring different branches of
* execution popped from the state stack. If it sees an old state that has
* more strict register state and more strict stack state then this execution
* branch doesn't need to be explored further, since verifier already
* concluded that more strict state leads to valid finish.
*
* Therefore two states are equivalent if register state is more conservative
* and explored stack state is more conservative than the current one.
* Example:
* explored current
* (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
* (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
*
* In other words if current stack state (one being explored) has more
* valid slots than old one that already passed validation, it means
* the verifier can stop exploring and conclude that current state is valid too
*
* Similarly with registers. If explored state has register type as invalid
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool func_states_equal(struct bpf_func_state *old,
struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
int i;
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
/* If we failed to allocate the idmap, just say it's not safe */
if (!idmap)
return false;
for (i = 0; i < MAX_BPF_REG; i++) {
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
goto out_free;
}
if (!stacksafe(old, cur, idmap))
goto out_free;
if (!refsafe(old, cur))
goto out_free;
ret = true;
out_free:
kfree(idmap);
return ret;
}
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
int i;
if (old->curframe != cur->curframe)
return false;
/* for states to be equal callsites have to be the same
* and all frame states need to be equivalent
*/
for (i = 0; i <= old->curframe; i++) {
if (old->frame[i]->callsite != cur->frame[i]->callsite)
return false;
if (!func_states_equal(old->frame[i], cur->frame[i]))
return false;
}
return true;
}
/* A write screens off any subsequent reads; but write marks come from the
* straight-line code between a state and its parent. When we arrive at an
* equivalent state (jump target or such) we didn't arrive by the straight-line
* code, so read marks in the state must propagate to the parent regardless
* of the state's write marks. That's what 'parent == state->parent' comparison
* in mark_reg_read() is for.
*/
static int propagate_liveness(struct bpf_verifier_env *env,
const struct bpf_verifier_state *vstate,
struct bpf_verifier_state *vparent)
{
int i, frame, err = 0;
struct bpf_func_state *state, *parent;
if (vparent->curframe != vstate->curframe) {
WARN(1, "propagate_live: parent frame %d current frame %d\n",
vparent->curframe, vstate->curframe);
return -EFAULT;
}
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
&vparent->frame[vstate->curframe]->regs[i]);
if (err)
return err;
}
}
/* ... and stack slots */
for (frame = 0; frame <= vstate->curframe; frame++) {
state = vstate->frame[frame];
parent = vparent->frame[frame];
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
i < parent->allocated_stack / BPF_REG_SIZE; i++) {
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
mark_reg_read(env, &state->stack[i].spilled_ptr,
&parent->stack[i].spilled_ptr);
}
}
return err;
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
int i, j, err, states_cnt = 0;
sl = env->explored_states[insn_idx];
if (!sl)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
clean_live_states(env, insn_idx, cur);
while (sl != STATE_LIST_MARK) {
if (states_equal(env, &sl->state, cur)) {
/* reached equivalent register/stack state,
* prune the search.
* Registers read by the continuation are read by us.
* If we have any write marks in env->cur_state, they
* will prevent corresponding reads in the continuation
* from reaching our parent (an explored_state). Our
* own state will get the read marks recorded, but
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
err = propagate_liveness(env, &sl->state, cur);
if (err)
return err;
return 1;
}
sl = sl->next;
states_cnt++;
}
if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return 0;
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach outer most bpf_exit (which means it's safe)
* or it will be rejected. Since there are no loops, we won't be
* seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
* again on the way to bpf_exit
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
return -ENOMEM;
/* add new state to the head of linked list */
new = &new_sl->state;
err = copy_verifier_state(new, cur);
if (err) {
free_verifier_state(new, false);
kfree(new_sl);
return err;
}
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
/* connect new state to parentage chain. Current frame needs all
* registers connected. Only r6 - r9 of the callers are alive (pushed
* to the stack implicitly by JITs) so in callers' frames connect just
* r6 - r9 as an optimization. Callers will have r1 - r5 connected to
* the state of the call instruction (with WRITTEN set), and r0 comes
* from callee with its full parentage chain, anyway.
*/
for (j = 0; j <= cur->curframe; j++)
for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
* their parent and current state never has children yet. Only
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
/* all stack frames are accessible from callee, clear them all */
for (j = 0; j <= cur->curframe; j++) {
struct bpf_func_state *frame = cur->frame[j];
struct bpf_func_state *newframe = new->frame[j];
for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
frame->stack[i].spilled_ptr.parent =
&newframe->stack[i].spilled_ptr;
}
}
return 0;
}
/* Return true if it's OK to have the same insn return a different type. */
static bool reg_type_mismatch_ok(enum bpf_reg_type type)
{
switch (type) {
case PTR_TO_CTX:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
return false;
default:
return true;
}
}
/* If an instruction was previously used with particular pointer types, then we
* need to be careful to avoid cases such as the below, where it may be ok
* for one branch accessing the pointer, but not ok for the other branch:
*
* R1 = sock_ptr
* goto X;
* ...
* R1 = some_other_valid_ptr;
* goto X;
* ...
* R2 = *(u32 *)(R1 + 0);
*/
static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
{
return src != prev && (!reg_type_mismatch_ok(src) ||
!reg_type_mismatch_ok(prev));
}
static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
int insn_cnt = env->prog->len, i;
int insn_processed = 0;
bool do_print_state = false;
env->prev_linfo = NULL;
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->curframe = 0;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
if (!state->frame[0]) {
kfree(state);
return -ENOMEM;
}
env->cur_state = state;
init_func_state(env, state->frame[0],
BPF_MAIN_FUNC /* callsite */,
0 /* frameno */,
0 /* subprogno, zero == main subprog */);
for (;;) {
struct bpf_insn *insn;
u8 class;
int err;
if (env->insn_idx >= insn_cnt) {
verbose(env, "invalid insn idx %d insn_cnt %d\n",
env->insn_idx, insn_cnt);
return -EFAULT;
}
insn = &insns[env->insn_idx];
class = BPF_CLASS(insn->code);
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
verbose(env,
"BPF program is too large. Processed %d insn\n",
insn_processed);
return -E2BIG;
}
err = is_state_visited(env, env->insn_idx);
if (err < 0)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (env->log.level) {
if (do_print_state)
verbose(env, "\nfrom %d to %d: safe\n",
env->prev_insn_idx, env->insn_idx);
else
verbose(env, "%d: safe\n", env->insn_idx);
}
goto process_bpf_exit;
}
if (signal_pending(current))
return -EAGAIN;
if (need_resched())
cond_resched();
if (env->log.level > 1 || (env->log.level && do_print_state)) {
if (env->log.level > 1)
verbose(env, "%d:", env->insn_idx);
else
verbose(env, "\nfrom %d to %d:",
env->prev_insn_idx, env->insn_idx);
print_verifier_state(env, state->frame[state->curframe]);
do_print_state = false;
}
if (env->log.level) {
const struct bpf_insn_cbs cbs = {
.cb_print = verbose,
.private_data = env,
};
verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "%d: ", env->insn_idx);
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
if (bpf_prog_is_dev_bound(env->prog->aux)) {
err = bpf_prog_offload_verify_insn(env, env->insn_idx,
env->prev_insn_idx);
if (err)
return err;
}
regs = cur_regs(env);
env->insn_aux_data[env->insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
enum bpf_reg_type *prev_src_type, src_reg_type;
/* check for reserved fields is already done */
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
if (err)
return err;
src_reg_type = regs[insn->src_reg].type;
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
err = check_mem_access(env, env->insn_idx, insn->src_reg,
insn->off, BPF_SIZE(insn->code),
BPF_READ, insn->dst_reg, false);
if (err)
return err;
prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
* save type to validate intersecting paths
*/
*prev_src_type = src_reg_type;
} else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
* src_reg == ctx in one branch and
* src_reg == stack|map in some other branch.
* Reject it.
*/
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_STX) {
enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, env->insn_idx, insn);
if (err)
return err;
env->insn_idx++;
continue;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
insn->off, BPF_SIZE(insn->code),
BPF_WRITE, insn->src_reg, false);
if (err)
return err;
prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
if (*prev_dst_type == NOT_INIT) {
*prev_dst_type = dst_reg_type;
} else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_ST uses reserved fields\n");
return -EINVAL;
}
/* check src operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (is_ctx_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
insn->off, BPF_SIZE(insn->code),
BPF_WRITE, -1, false);
if (err)
return err;
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
(insn->src_reg != BPF_REG_0 &&
insn->src_reg != BPF_PSEUDO_CALL) ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_CALL uses reserved fields\n");
return -EINVAL;
}
if (insn->src_reg == BPF_PSEUDO_CALL)
err = check_func_call(env, insn, &env->insn_idx);
else
err = check_helper_call(env, insn->imm, env->insn_idx);
if (err)
return err;
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL;
}
env->insn_idx += insn->off + 1;
continue;
} else if (opcode == BPF_EXIT) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0) {
verbose(env, "BPF_EXIT uses reserved fields\n");
return -EINVAL;
}
if (state->curframe) {
/* exit from nested function */
env->prev_insn_idx = env->insn_idx;
err = prepare_func_exit(env, &env->insn_idx);
if (err)
return err;
do_print_state = true;
continue;
}
err = check_reference_leak(env);
if (err)
return err;
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
* of bpf_exit, which means that program wrote
* something into it earlier
*/
err = check_reg_arg(env, BPF_REG_0, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, BPF_REG_0)) {
verbose(env, "R0 leaks addr as return value\n");
return -EACCES;
}
err = check_return_code(env);
if (err)
return err;
process_bpf_exit:
err = pop_stack(env, &env->prev_insn_idx,
&env->insn_idx);
if (err < 0) {
if (err != -ENOENT)
return err;
break;
} else {
do_print_state = true;
continue;
}
} else {
err = check_cond_jmp_op(env, insn, &env->insn_idx);
if (err)
return err;
}
} else if (class == BPF_LD) {
u8 mode = BPF_MODE(insn->code);
if (mode == BPF_ABS || mode == BPF_IND) {
err = check_ld_abs(env, insn);
if (err)
return err;
} else if (mode == BPF_IMM) {
err = check_ld_imm(env, insn);
if (err)
return err;
env->insn_idx++;
env->insn_aux_data[env->insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
}
} else {
verbose(env, "unknown insn class %d\n", class);
return -EINVAL;
}
env->insn_idx++;
}
verbose(env, "processed %d insns (limit %d), stack depth ",
insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
for (i = 0; i < env->subprog_cnt; i++) {
u32 depth = env->subprog_info[i].stack_depth;
verbose(env, "%d", depth);
if (i + 1 < env->subprog_cnt)
verbose(env, "+");
}
verbose(env, "\n");
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
return 0;
}
static int check_map_prealloc(struct bpf_map *map)
{
return (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
!(map->map_flags & BPF_F_NO_PREALLOC);
}
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
* preallocated hash maps, since doing memory allocation
* in overflow_handler can crash depending on where nmi got
* triggered.
*/
if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
if (!check_map_prealloc(map)) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
if (map->inner_map_meta &&
!check_map_prealloc(map->inner_map_meta)) {
verbose(env, "perf_event programs can only use preallocated inner hash map\n");
return -EINVAL;
}
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
return 0;
}
static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
{
return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, j, err;
err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose(env, "BPF_LDX uses reserved fields\n");
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_STX &&
((BPF_MODE(insn->code) != BPF_MEM &&
BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
verbose(env, "BPF_STX uses reserved fields\n");
return -EINVAL;
}
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
insn[1].off != 0) {
verbose(env, "invalid bpf_ld_imm64 insn\n");
return -EINVAL;
}
if (insn->src_reg == 0)
/* valid generic load 64-bit imm */
goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
verbose(env,
"unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn->imm);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n",
insn->imm);
return PTR_ERR(map);
}
err = check_map_prog_compatibility(env, map, env->prog);
if (err) {
fdput(f);
return err;
}
/* store map pointer inside BPF_LD_IMM64 instruction */
insn[0].imm = (u32) (unsigned long) map;
insn[1].imm = ((u64) (unsigned long) map) >> 32;
/* check whether we recorded this map already */
for (j = 0; j < env->used_map_cnt; j++)
if (env->used_maps[j] == map) {
fdput(f);
goto next_insn;
}
if (env->used_map_cnt >= MAX_USED_MAPS) {
fdput(f);
return -E2BIG;
}
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_used_maps()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
fdput(f);
return -EBUSY;
}
fdput(f);
next_insn:
insn++;
i++;
continue;
}
/* Basic sanity check before we invest more work here. */
if (!bpf_opcode_in_insntable(insn->code)) {
verbose(env, "unknown opcode %02x\n", insn->code);
return -EINVAL;
}
}
/* now all pseudo BPF_LD_IMM64 instructions load valid
* 'struct bpf_map *' into a register instead of user map_fd.
* These pointers will be used later by verifier to validate map access.
*/
return 0;
}
/* drop refcnt of maps used by the rejected program */
static void release_maps(struct bpf_verifier_env *env)
{
enum bpf_cgroup_storage_type stype;
int i;
for_each_cgroup_storage_type(stype) {
if (!env->prog->aux->cgroup_storage[stype])
continue;
bpf_cgroup_storage_release(env->prog,
env->prog->aux->cgroup_storage[stype]);
}
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++)
if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
insn->src_reg = 0;
}
/* single env->prog->insni[off] instruction was replaced with the range
* insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
* [0, off) and [off, end) to new locations, so the patched range stays zero
*/
static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
int i;
if (cnt == 1)
return 0;
new_data = vzalloc(array_size(prog_len,
sizeof(struct bpf_insn_aux_data)));
if (!new_data)
return -ENOMEM;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
for (i = off; i < off + cnt - 1; i++)
new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
}
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
{
int i;
if (len == 1)
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}
}
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
struct bpf_prog *new_prog;
new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
if (!new_prog)
return NULL;
if (adjust_insn_aux_data(env, new_prog->len, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
return new_prog;
}
/* The verifier does more data flow analysis than llvm and will not
* explore branches that are dead at run time. Malicious programs can
* have dead code too. Therefore replace all dead at-run-time code
* with 'ja -1'.
*
* Just nops are not optimal, e.g. if they would sit at the end of the
* program and through another bug we would manage to jump there, then
* we'd execute beyond program memory otherwise. Returning exception
* code also wouldn't work since we can have subprogs where the dead
* code could be located.
*/
static void sanitize_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++) {
if (aux_data[i].seen)
continue;
memcpy(insn + i, &trap, sizeof(trap));
}
}
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
* struct bpf_sock_ops -> struct sock
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
const struct bpf_verifier_ops *ops = env->ops;
int i, cnt, size, ctx_field_size, delta = 0;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
u32 target_size, size_default, off;
struct bpf_prog *new_prog;
enum bpf_access_type type;
bool is_narrower_load;
if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
} else if (cnt) {
new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
env->prog = new_prog;
delta += cnt - 1;
}
}
if (bpf_prog_is_dev_bound(env->prog->aux))
return 0;
insn = env->prog->insnsi + delta;
for (i = 0; i < insn_cnt; i++, insn++) {
bpf_convert_ctx_access_t convert_ctx_access;
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
continue;
if (type == BPF_WRITE &&
env->insn_aux_data[i + delta].sanitize_stack_off) {
struct bpf_insn patch[] = {
/* Sanitize suspicious stack slot with zero.
* There are no memory dependencies for this store,
* since it's only using frame pointer and immediate
* constant of zero
*/
BPF_ST_MEM(BPF_DW, BPF_REG_FP,
env->insn_aux_data[i + delta].sanitize_stack_off,
0),
/* the original STX instruction will immediately
* overwrite the same stack slot with appropriate value
*/
*insn,
};
cnt = ARRAY_SIZE(patch);
new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
switch (env->insn_aux_data[i + delta].ptr_type) {
case PTR_TO_CTX:
if (!ops->convert_ctx_access)
continue;
convert_ctx_access = ops->convert_ctx_access;
break;
case PTR_TO_SOCKET:
convert_ctx_access = bpf_sock_convert_ctx_access;
break;
default:
continue;
}
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
size = BPF_LDST_BYTES(insn);
/* If the read access is a narrower load of the field,
* convert to a 4/8-byte load, to minimum program type specific
* convert_ctx_access changes. If conversion is successful,
* we will apply proper mask to the result.
*/
is_narrower_load = size < ctx_field_size;
size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
off = insn->off;
if (is_narrower_load) {
u8 size_code;
if (type == BPF_WRITE) {
verbose(env, "bpf verifier narrow ctx access misconfigured\n");
return -EINVAL;
}
size_code = BPF_H;
if (ctx_field_size == 4)
size_code = BPF_W;
else if (ctx_field_size == 8)
size_code = BPF_DW;
insn->off = off & ~(size_default - 1);
insn->code = BPF_LDX | BPF_MEM | size_code;
}
target_size = 0;
cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
(ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
if (is_narrower_load && size < target_size) {
u8 shift = (off & (size_default - 1)) * 8;
if (ctx_field_size <= 4) {
if (shift)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
insn->dst_reg,
shift);
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
} else {
if (shift)
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
insn->dst_reg,
shift);
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
}
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
insn = new_prog->insnsi + i + delta;
}
return 0;
}
static int jit_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog;
struct bpf_insn *insn;
void *old_bpf_func;
int err;
if (env->subprog_cnt <= 1)
return 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
/* Upon error here we cannot fall back to interpreter but
* need a hard reject of the program. Thus -EFAULT is
* propagated in any case.
*/
subprog = find_subprog(env, i + insn->imm + 1);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
i + insn->imm + 1);
return -EFAULT;
}
/* temporarily remember subprog id inside insn instead of
* aux_data, since next loop will split up all insns into funcs
*/
insn->off = subprog;
/* remember original imm in case JIT fails and fallback
* to interpreter will be needed
*/
env->insn_aux_data[i].call_imm = insn->imm;
/* point imm to __bpf_call_base+1 from JITs point of view */
insn->imm = 1;
}
err = bpf_prog_alloc_jited_linfo(prog);
if (err)
goto out_undo_insn;
err = -ENOMEM;
func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
goto out_undo_insn;
for (i = 0; i < env->subprog_cnt; i++) {
subprog_start = subprog_end;
subprog_end = env->subprog_info[i + 1].start;
len = subprog_end - subprog_start;
func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
if (!func[i])
goto out_free;
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
len * sizeof(struct bpf_insn));
func[i]->type = prog->type;
func[i]->len = len;
if (bpf_prog_calc_tag(func[i]))
goto out_free;
func[i]->is_func = 1;
func[i]->aux->func_idx = i;
/* the btf and func_info will be freed only at prog->aux */
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->func_info = prog->aux->func_info;
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
*/
func[i]->aux->name[0] = 'F';
func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
func[i]->jit_requested = 1;
func[i]->aux->linfo = prog->aux->linfo;
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
goto out_free;
}
cond_resched();
}
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
*/
for (i = 0; i < env->subprog_cnt; i++) {
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
subprog = insn->off;
insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
func[subprog]->bpf_func -
__bpf_call_base;
}
/* we use the aux data to keep a list of the start addresses
* of the JITed images for each function in the program
*
* for some architectures, such as powerpc64, the imm field
* might not be large enough to hold the offset of the start
* address of the callee's JITed image from __bpf_call_base
*
* in such cases, we can lookup the start address of a callee
* by using its subprog id, available from the off field of
* the call instruction, as an index for this list
*/
func[i]->aux->func = func;
func[i]->aux->func_cnt = env->subprog_cnt;
}
for (i = 0; i < env->subprog_cnt; i++) {
old_bpf_func = func[i]->bpf_func;
tmp = bpf_int_jit_compile(func[i]);
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
err = -ENOTSUPP;
goto out_free;
}
cond_resched();
}
/* finally lock prog and jit images for all functions and
* populate kallsysm
*/
for (i = 0; i < env->subprog_cnt; i++) {
bpf_prog_lock_ro(func[i]);
bpf_prog_kallsyms_add(func[i]);
}
/* Last step: make now unused interpreter insns from main
* prog consistent for later dump requests, so they can
* later look the same as if they were interpreted only.
*/
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
insn->off = env->insn_aux_data[i].call_imm;
subprog = find_subprog(env, i + insn->off + 1);
insn->imm = subprog;
}
prog->jited = 1;
prog->bpf_func = func[0]->bpf_func;
prog->aux->func = func;
prog->aux->func_cnt = env->subprog_cnt;
bpf_prog_free_unused_jited_linfo(prog);
return 0;
out_free:
for (i = 0; i < env->subprog_cnt; i++)
if (func[i])
bpf_jit_free(func[i]);
kfree(func);
out_undo_insn:
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
insn->off = 0;
insn->imm = env->insn_aux_data[i].call_imm;
}
bpf_prog_free_jited_linfo(prog);
return err;
}
static int fixup_call_args(struct bpf_verifier_env *env)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
int i, depth;
#endif
int err = 0;
if (env->prog->jit_requested &&
!bpf_prog_is_dev_bound(env->prog->aux)) {
err = jit_subprogs(env);
if (err == 0)
return 0;
if (err == -EFAULT)
return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
depth = get_callee_stack_depth(env, insn, i);
if (depth < 0)
return depth;
bpf_patch_call_args(insn, depth);
}
err = 0;
#endif
return err;
}
/* fixup insn->imm field of bpf_call instructions
* and inline eligible helpers as explicit sequence of BPF instructions
*
* this function is called after eBPF program passed verification
*/
static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
struct bpf_insn mask_and_div[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx div 0 -> 0 */
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
struct bpf_insn mask_and_mod[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx mod 0 -> Rx */
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
*insn,
};
struct bpf_insn *patchlet;
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
patchlet = mask_and_div + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
} else {
patchlet = mask_and_mod + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
}
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (BPF_CLASS(insn->code) == BPF_LD &&
(BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->src_reg == BPF_PSEUDO_CALL)
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_override_return)
prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
* be replaced dynamically during runtime in
* the program array.
*/
prog->cb_access = 1;
env->prog->aux->stack_depth = MAX_BPF_STACK;
env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
if (!bpf_map_ptr_unpriv(aux))
continue;
/* instead of changing every JIT dealing with tail_call
* emit two extra insns:
* if (index >= max_entries) goto out;
* index &= array->index_mask;
* to avoid out-of-bounds cpu speculation
*/
if (bpf_map_ptr_poisoned(aux)) {
verbose(env, "tail_call abusing map_ptr\n");
return -EINVAL;
}
map_ptr = BPF_MAP_PTR(aux->map_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
container_of(map_ptr,
struct bpf_array,
map)->index_mask);
insn_buf[2] = *insn;
cnt = 3;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* and other inlining handlers are currently limited to 64 bit
* only.
*/
if (prog->jit_requested && BITS_PER_LONG == 64 &&
(insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem ||
insn->imm == BPF_FUNC_map_delete_elem ||
insn->imm == BPF_FUNC_map_push_elem ||
insn->imm == BPF_FUNC_map_pop_elem ||
insn->imm == BPF_FUNC_map_peek_elem)) {
aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state);
ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta,
insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
(int (*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_push_elem,
(int (*)(struct bpf_map *map, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_update_elem:
insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_delete_elem:
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_push_elem:
insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_pop_elem:
insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_peek_elem:
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
__bpf_call_base;
continue;
}
goto patch_call_imm;
}
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
verbose(env,
"kernel subsystem misconfigured func %s#%d\n",
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
}
return 0;
}
static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
return;
for (i = 0; i < env->prog->len; i++) {
sl = env->explored_states[i];
if (sl)
while (sl != STATE_LIST_MARK) {
sln = sl->next;
free_verifier_state(&sl->state, false);
kfree(sl);
sl = sln;
}
}
kfree(env->explored_states);
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
int ret = -EINVAL;
/* no program is valid */
if (ARRAY_SIZE(bpf_verifier_ops) == 0)
return -EINVAL;
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
log = &env->log;
env->insn_aux_data =
vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
(*prog)->len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
if (attr->log_level || attr->log_buf || attr->log_size) {
/* user requested verbose verifier output
* and supplied buffer to store the verification trace
*/
log->level = attr->log_level;
log->ubuf = (char __user *) (unsigned long) attr->log_buf;
log->len_total = attr->log_size;
ret = -EINVAL;
/* log attributes have to be sane */
if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
!log->level || !log->ubuf)
goto err_unlock;
}
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret)
goto skip_full_check;
}
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = check_subprogs(env);
if (ret < 0)
goto skip_full_check;
ret = check_btf_info(env, attr, uattr);
if (ret < 0)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
}
if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
ret = bpf_prog_offload_finalize(env);
skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
if (ret == 0)
ret = check_max_stack_depth(env);
/* instruction rewrites happen after this point */
if (ret == 0)
sanitize_dead_code(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
if (ret == 0)
ret = fixup_bpf_calls(env);
if (ret == 0)
ret = fixup_call_args(env);
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {
ret = -EFAULT;
goto err_release_maps;
}
if (ret == 0 && env->used_map_cnt) {
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
GFP_KERNEL);
if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto err_release_maps;
}
memcpy(env->prog->aux->used_maps, env->used_maps,
sizeof(env->used_maps[0]) * env->used_map_cnt);
env->prog->aux->used_map_cnt = env->used_map_cnt;
/* program is valid. Convert pseudo bpf_ld_imm64 into generic
* bpf_ld_imm64 instructions
*/
convert_pseudo_ld_imm64(env);
}
if (ret == 0)
adjust_btf_func(env);
err_release_maps:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_used_maps() will release them.
*/
release_maps(env);
*prog = env->prog;
err_unlock:
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_1411_1 |
crossvul-cpp_data_good_3478_0 | /*
* Performance events x86 architecture code
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/compat.h>
#include <asm/smp.h>
#if 0
#undef wrmsrl
#define wrmsrl(msr, val) \
do { \
trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
(unsigned long)(val)); \
native_write_msr((msr), (u32)((u64)(val)), \
(u32)((u64)(val) >> 32)); \
} while (0)
#endif
/*
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
*/
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
unsigned long size, len = 0;
struct page *page;
void *map;
int ret;
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
break;
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page);
memcpy(to, map+offset, size);
kunmap_atomic(map);
put_page(page);
len += size;
to += size;
addr += size;
} while (len < n);
return len;
}
struct event_constraint {
union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
u64 code;
u64 cmask;
int weight;
};
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
struct perf_event *owners[X86_PMC_IDX_MAX];
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
struct intel_percore;
#define MAX_LBR_ENTRIES 16
struct cpu_hw_events {
/*
* Generic x86 PMC bits
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
unsigned int group_flag;
/*
* Intel DebugStore bits
*/
struct debug_store *ds;
u64 pebs_enabled;
/*
* Intel LBR bits
*/
int lbr_users;
void *lbr_context;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
* Intel percore register state.
* Coordinate shared resources between HT threads.
*/
int percore_used; /* Used by this CPU? */
struct intel_percore *per_core;
/*
* AMD specific bits
*/
struct amd_nb *amd_nb;
};
#define __EVENT_CONSTRAINT(c, n, m, w) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
}
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
/*
* Constraint on the Event code.
*/
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
* the following filters disqualify for fixed counters:
* - inv
* - edge
* - cnt-mask
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
#define FIXED_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->weight; (e)++)
/*
* Extra registers for specific events.
* Some events need large masks and require external MSRs.
* Define a mapping to these extra registers.
*/
struct extra_reg {
unsigned int event;
unsigned int msr;
u64 config_mask;
u64 valid_mask;
};
#define EVENT_EXTRA_REG(e, ms, m, vm) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
union perf_capabilities {
struct {
u64 lbr_format : 6;
u64 pebs_trap : 1;
u64 pebs_arch_reg : 1;
u64 pebs_format : 4;
u64 smm_freeze : 1;
};
u64 capabilities;
};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
/*
* Generic x86 PMC bits
*/
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
int max_events;
int num_counters;
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
int apic;
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
struct event_constraint *percore_constraints;
void (*quirks)(void);
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
/*
* Intel Arch Perfmon v2+
*/
u64 intel_ctrl;
union perf_capabilities intel_cap;
/*
* Intel DebugStore bits
*/
int bts, pebs;
int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
/*
* Intel LBR
*/
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
int lbr_nr; /* hardware stack size */
/*
* Extra registers for events
*/
struct extra_reg *extra_regs;
};
static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
static int x86_perf_event_set_period(struct perf_event *event);
/*
* Generalized hw caching related hw_event table, filled
* in on a per model basis. A value of 0 means
* 'not supported', -1 means 'hw_event makes no sense on
* this CPU', any other value means the raw hw_event
* ID.
*/
#define C(x) PERF_COUNT_HW_CACHE_##x
static u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
static u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
/*
* Propagate event elapsed time into the generic event.
* Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
static u64
x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
/*
* Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic event atomically:
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
rdmsrl(hwc->event_base, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */
static inline int x86_pmu_addr_offset(int index)
{
if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return index << 1;
return index;
}
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
}
static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
}
/*
* Find and validate any extra registers to set up.
*/
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{
struct extra_reg *er;
event->hw.extra_reg = 0;
event->hw.extra_config = 0;
if (!x86_pmu.extra_regs)
return 0;
for (er = x86_pmu.extra_regs; er->msr; er++) {
if (er->event != (config & er->config_mask))
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
event->hw.extra_reg = er->msr;
event->hw.extra_config = event->attr.config1;
break;
}
return 0;
}
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
static bool reserve_pmc_hardware(void)
{
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}
return true;
eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu_config_addr(i));
i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
release_perfctr_nmi(x86_pmu_event_addr(i));
return false;
}
static void release_pmc_hardware(void)
{
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i));
}
}
#else
static bool reserve_pmc_hardware(void) { return true; }
static void release_pmc_hardware(void) {}
#endif
static bool check_hw_exists(void)
{
u64 val, val_new = 0;
int i, reg, ret = 0;
/*
* Check to see if the BIOS enabled any of the counters, if so
* complain and bail.
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
goto bios_fail;
}
if (x86_pmu.num_counters_fixed) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
if (val & (0x03 << i*4))
goto bios_fail;
}
}
/*
* Now write a value and read it back to see if it matches,
* this is needed to detect certain hardware emulators (qemu/kvm)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
if (ret || val != val_new)
goto msr_fail;
return true;
bios_fail:
printk(KERN_CONT "Broken BIOS detected, using software events only.\n");
printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
return false;
msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
return false;
}
static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
static inline int x86_pmu_initialized(void)
{
return x86_pmu.handle_irq != NULL;
}
static inline int
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
unsigned int cache_type, cache_op, cache_result;
u64 config, val;
config = attr->config;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
if (val == 0)
return -ENOENT;
if (val == -1)
return -EINVAL;
hwc->config |= val;
attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
return x86_pmu_extra_regs(val, event);
}
static int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
u64 config;
if (!is_sampling_event(event)) {
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
} else {
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
* events (user-space has to fall back and
* sample via a hrtimer based software event):
*/
if (!x86_pmu.apic)
return -EOPNOTSUPP;
}
if (attr->type == PERF_TYPE_RAW)
return x86_pmu_extra_regs(event->attr.config, event);
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
config = x86_pmu.event_map(attr->config);
if (config == 0)
return -ENOENT;
if (config == -1LL)
return -EINVAL;
/*
* Branch tracing:
*/
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
/* BTS is not supported by this architecture. */
if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
if (!attr->exclude_kernel)
return -EOPNOTSUPP;
}
hwc->config |= config;
return 0;
}
static int x86_pmu_hw_config(struct perf_event *event)
{
if (event->attr.precise_ip) {
int precise = 0;
/* Support for constant skid */
if (x86_pmu.pebs_active) {
precise++;
/* Support for IP fixup */
if (x86_pmu.lbr_nr)
precise++;
}
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
}
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
/*
* Count user and OS events unless requested not to
*/
if (!event->attr.exclude_user)
event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
if (!event->attr.exclude_kernel)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
return x86_setup_perfctr(event);
}
/*
* Setup the hardware configuration for a given attr_type
*/
static int __x86_pmu_event_init(struct perf_event *event)
{
int err;
if (!x86_pmu_initialized())
return -ENODEV;
err = 0;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
return err;
event->destroy = hw_perf_event_destroy;
event->hw.idx = -1;
event->hw.last_cpu = -1;
event->hw.last_tag = ~0ULL;
return x86_pmu.hw_config(event);
}
static void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu_config_addr(idx), val);
}
}
static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!x86_pmu_initialized())
return;
if (!cpuc->enabled)
return;
cpuc->n_added = 0;
cpuc->enabled = 0;
barrier();
x86_pmu.disable_all();
}
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
if (hwc->extra_reg)
wrmsrl(hwc->extra_reg, hwc->extra_config);
wrmsrl(hwc->config_base, hwc->config | enable_mask);
}
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
static struct pmu pmu;
static inline int is_x86_event(struct perf_event *event)
{
return event->pmu == &pmu;
}
static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int i, j, w, wmax, num = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
for (i = 0; i < n; i++) {
c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
constraints[i] = c;
}
/*
* fastpath, try to reuse previous register
*/
for (i = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw;
c = constraints[i];
/* never assigned */
if (hwc->idx == -1)
break;
/* constraint still honored */
if (!test_bit(hwc->idx, c->idxmsk))
break;
/* not already used */
if (test_bit(hwc->idx, used_mask))
break;
__set_bit(hwc->idx, used_mask);
if (assign)
assign[i] = hwc->idx;
}
if (i == n)
goto done;
/*
* begin slow path
*/
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
/*
* weight = number of possible counters
*
* 1 = most constrained, only works on one counter
* wmax = least constrained, works on any counter
*
* assign events to counters starting with most
* constrained events.
*/
wmax = x86_pmu.num_counters;
/*
* when fixed event counters are present,
* wmax is incremented by 1 to account
* for one more choice
*/
if (x86_pmu.num_counters_fixed)
wmax++;
for (w = 1, num = n; num && w <= wmax; w++) {
/* for each event */
for (i = 0; num && i < n; i++) {
c = constraints[i];
hwc = &cpuc->event_list[i]->hw;
if (c->weight != w)
continue;
for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
if (!test_bit(j, used_mask))
break;
}
if (j == X86_PMC_IDX_MAX)
break;
__set_bit(j, used_mask);
if (assign)
assign[i] = j;
num--;
}
}
done:
/*
* scheduling failed or is just a simulation,
* free resources if necessary
*/
if (!assign || num) {
for (i = 0; i < n; i++) {
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
return num ? -ENOSPC : 0;
}
/*
* dogrp: true if must collect siblings events (group)
* returns total number of events and error code
*/
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
struct perf_event *event;
int n, max_count;
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
/* current number of events already accepted */
n = cpuc->n_events;
if (is_x86_event(leader)) {
if (n >= max_count)
return -ENOSPC;
cpuc->event_list[n] = leader;
n++;
}
if (!dogrp)
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
if (!is_x86_event(event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;
if (n >= max_count)
return -ENOSPC;
cpuc->event_list[n] = event;
n++;
}
return n;
}
static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
}
static inline int match_prev_assignment(struct hw_perf_event *hwc,
struct cpu_hw_events *cpuc,
int i)
{
return hwc->idx == cpuc->assign[i] &&
hwc->last_cpu == smp_processor_id() &&
hwc->last_tag == cpuc->tags[i];
}
static void x86_pmu_start(struct perf_event *event, int flags);
static void x86_pmu_stop(struct perf_event *event, int flags);
static void x86_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i, added = cpuc->n_added;
if (!x86_pmu_initialized())
return;
if (cpuc->enabled)
return;
if (cpuc->n_added) {
int n_running = cpuc->n_events - cpuc->n_added;
/*
* apply assignment obtained either from
* hw_perf_group_sched_in() or x86_pmu_enable()
*
* step1: save events moving to new counters
* step2: reprogram moved events into new counters
*/
for (i = 0; i < n_running; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
/*
* we can avoid reprogramming counter if:
* - assigned same counter as last time
* - running on same CPU as last time
* - no other event has used the counter since
*/
if (hwc->idx == -1 ||
match_prev_assignment(hwc, cpuc, i))
continue;
/*
* Ensure we don't accidentally enable a stopped
* counter simply because we rescheduled.
*/
if (hwc->state & PERF_HES_STOPPED)
hwc->state |= PERF_HES_ARCH;
x86_pmu_stop(event, PERF_EF_UPDATE);
}
for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
if (!match_prev_assignment(hwc, cpuc, i))
x86_assign_hw_event(event, cpuc, i);
else if (i < n_running)
continue;
if (hwc->state & PERF_HES_ARCH)
continue;
x86_pmu_start(event, PERF_EF_RELOAD);
}
cpuc->n_added = 0;
perf_events_lapic_init();
}
cpuc->enabled = 1;
barrier();
x86_pmu.enable_all(added);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
wrmsrl(hwc->config_base, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the event disabled in hw:
*/
static int
x86_perf_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
/*
* If we are way outside a reasonable range then just skip forward:
*/
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Quirk: certain CPUs dont like it if just 1 hw_event is left:
*/
if (unlikely(left < 2))
left = 2;
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
* The hw event starts counting from this event offset,
* mark it to be able to extra future deltas:
*/
local64_set(&hwc->prev_count, (u64)-left);
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
* Due to erratum on certan cpu we need
* a second write to be sure the register
* is updated properly
*/
if (x86_pmu.perfctr_second_write) {
wrmsrl(hwc->event_base,
(u64)(-left) & x86_pmu.cntval_mask);
}
perf_event_update_userpage(event);
return ret;
}
static void x86_pmu_enable_event(struct perf_event *event)
{
if (__this_cpu_read(cpu_hw_events.enabled))
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
* Add a single event to the PMU.
*
* The event is added to the group of enabled events
* but only if it can be scehduled with existing events.
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc;
int assign[X86_PMC_IDX_MAX];
int n, n0, ret;
hwc = &event->hw;
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
goto out;
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* at commit time (->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
goto out;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
done_collect:
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
ret = 0;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static void x86_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
x86_perf_event_set_period(event);
}
event->hw.state = 0;
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running);
x86_pmu.enable(event);
perf_event_update_userpage(event);
}
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
u64 pebs;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_events, cpu);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu);
pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl);
pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
local_irq_restore(flags);
}
static void x86_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
/*
* Drain the remaining delta count out of a event
* that we are disabling:
*/
x86_perf_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
static void x86_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int i;
/*
* If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
return;
x86_pmu_stop(event, PERF_EF_UPDATE);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
while (++i < cpuc->n_events)
cpuc->event_list[i-1] = cpuc->event_list[i];
--cpuc->n_events;
break;
}
}
perf_event_update_userpage(event);
}
static int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
int idx, handled = 0;
u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;
/*
* event overflow
*/
handled++;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
x86_pmu_stop(event, 0);
}
if (handled)
inc_irq_stat(apic_perf_irqs);
return handled;
}
void perf_events_lapic_init(void)
{
if (!x86_pmu.apic || !x86_pmu_initialized())
return;
/*
* Always use NMI for PMU
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
struct pmu_nmi_state {
unsigned int marked;
int handled;
};
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
apic_write(APIC_LVTPC, APIC_DM_NMI);
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static struct event_constraint unconstrained;
static struct event_constraint emptyconstraint;
static struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_p4.c"
#include "perf_event_intel_lbr.c"
#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
case CPU_STARTING:
if (x86_pmu.cpu_starting)
x86_pmu.cpu_starting(cpu);
break;
case CPU_DYING:
if (x86_pmu.cpu_dying)
x86_pmu.cpu_dying(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
break;
default:
break;
}
return ret;
}
static void __init pmu_check_apic(void)
{
if (cpu_has_apic)
return;
x86_pmu.apic = 0;
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
pr_info("no hardware sampling interrupt available.\n");
}
static int __init init_hw_perf_events(void)
{
struct event_constraint *c;
int err;
pr_info("Performance Events: ");
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
err = intel_pmu_init();
break;
case X86_VENDOR_AMD:
err = amd_pmu_init();
break;
default:
return 0;
}
if (err != 0) {
pr_cont("no PMU driver, software events only.\n");
return 0;
}
pmu_check_apic();
/* sanity check that the hardware exists or is emulated */
if (!check_hw_exists())
return 0;
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.quirks)
x86_pmu.quirks();
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters);
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK)
continue;
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(x86_pmu_notifier);
return 0;
}
early_initcall(init_hw_perf_events);
static inline void x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void x86_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
__this_cpu_write(cpu_hw_events.n_txn, 0);
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void x86_pmu_cancel_txn(struct pmu *pmu)
{
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
/*
* Truncate the collected events.
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int x86_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
int n, ret;
n = cpuc->n_events;
if (!x86_pmu_initialized())
return -EAGAIN;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* validate that we can schedule this event
*/
static int validate_event(struct perf_event *event)
{
struct cpu_hw_events *fake_cpuc;
struct event_constraint *c;
int ret = 0;
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
if (!fake_cpuc)
return -ENOMEM;
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
ret = -ENOSPC;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
kfree(fake_cpuc);
return ret;
}
/*
* validate a single event group
*
* validation include:
* - check events are compatible which each other
* - events do not compete for the same counter
* - number of events <= number of counters
*
* validation ensures the group can be loaded onto the
* PMU if it was the only group available.
*/
static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
int ret, n;
ret = -ENOMEM;
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
if (!fake_cpuc)
goto out;
/*
* the event is not yet connected with its
* siblings therefore we must first collect
* existing siblings, then add the new event
* before we can simulate the scheduling
*/
ret = -ENOSPC;
n = collect_events(fake_cpuc, leader, true);
if (n < 0)
goto out_free;
fake_cpuc->n_events = n;
n = collect_events(fake_cpuc, event, false);
if (n < 0)
goto out_free;
fake_cpuc->n_events = n;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out_free:
kfree(fake_cpuc);
out:
return ret;
}
static int x86_pmu_event_init(struct perf_event *event)
{
struct pmu *tmp;
int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
err = __x86_pmu_event_init(event);
if (!err) {
/*
* we temporarily connect event to its pmu
* such that validate_group() can classify
* it as an x86 event using is_x86_event()
*/
tmp = event->pmu;
event->pmu = &pmu;
if (event->group_leader != event)
err = validate_group(event);
else
err = validate_event(event);
event->pmu = tmp;
}
if (err) {
if (event->destroy)
event->destroy(event);
}
return err;
}
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
.event_init = x86_pmu_event_init,
.add = x86_pmu_add,
.del = x86_pmu_del,
.start = x86_pmu_start,
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.start_txn = x86_pmu_start_txn,
.cancel_txn = x86_pmu_cancel_txn,
.commit_txn = x86_pmu_commit_txn,
};
/*
* callchain support
*/
static void
backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
/* Ignore warnings */
}
static void backtrace_warning(void *data, char *msg)
{
/* Ignore warnings */
}
static int backtrace_stack(void *data, char *name)
{
return 0;
}
static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, addr);
}
static const struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
.walk_stack = print_context_stack_bp,
};
void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
}
#ifdef CONFIG_COMPAT
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
fp = compat_ptr(regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if (fp < compat_ptr(regs->sp))
break;
perf_callchain_store(entry, frame.return_address);
fp = compat_ptr(frame.next_frame);
}
return 1;
}
#else
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
return 0;
}
#endif
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
if (perf_callchain_user32(regs, entry))
return;
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if ((unsigned long)fp < regs->sp)
break;
perf_callchain_store(entry, frame.return_address);
fp = frame.next_frame;
}
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
ip = perf_guest_cbs->get_guest_ip();
else
ip = instruction_pointer(regs);
return ip;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
if (regs->flags & PERF_EFLAGS_EXACT)
misc |= PERF_RECORD_MISC_EXACT_IP;
return misc;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3478_0 |
crossvul-cpp_data_bad_5798_0 | #include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "decl.h"
#include "cmd.h"
#include "debugfs.h"
static struct dentry *lbs_dir;
static char *szStates[] = {
"Connected",
"Disconnected"
};
#ifdef PROC_DEBUG
static void lbs_debug_init(struct lbs_private *priv);
#endif
static ssize_t write_file_dummy(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return -EINVAL;
}
static const size_t len = PAGE_SIZE;
static ssize_t lbs_dev_info(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
size_t pos = 0;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
ssize_t res;
if (!buf)
return -ENOMEM;
pos += snprintf(buf+pos, len-pos, "state = %s\n",
szStates[priv->connect_status]);
pos += snprintf(buf+pos, len-pos, "region_code = %02x\n",
(u32) priv->regioncode);
res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
free_page(addr);
return res;
}
static ssize_t lbs_sleepparams_write(struct file *file,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t buf_size, ret;
struct sleep_params sp;
int p1, p2, p3, p4, p5, p6;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, user_buf, buf_size)) {
ret = -EFAULT;
goto out_unlock;
}
ret = sscanf(buf, "%d %d %d %d %d %d", &p1, &p2, &p3, &p4, &p5, &p6);
if (ret != 6) {
ret = -EINVAL;
goto out_unlock;
}
sp.sp_error = p1;
sp.sp_offset = p2;
sp.sp_stabletime = p3;
sp.sp_calcontrol = p4;
sp.sp_extsleepclk = p5;
sp.sp_reserved = p6;
ret = lbs_cmd_802_11_sleep_params(priv, CMD_ACT_SET, &sp);
if (!ret)
ret = count;
else if (ret > 0)
ret = -EINVAL;
out_unlock:
free_page(addr);
return ret;
}
static ssize_t lbs_sleepparams_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t ret;
size_t pos = 0;
struct sleep_params sp;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
ret = lbs_cmd_802_11_sleep_params(priv, CMD_ACT_GET, &sp);
if (ret)
goto out_unlock;
pos += snprintf(buf, len, "%d %d %d %d %d %d\n", sp.sp_error,
sp.sp_offset, sp.sp_stabletime,
sp.sp_calcontrol, sp.sp_extsleepclk,
sp.sp_reserved);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
out_unlock:
free_page(addr);
return ret;
}
static ssize_t lbs_host_sleep_write(struct file *file,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t buf_size, ret;
int host_sleep;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, user_buf, buf_size)) {
ret = -EFAULT;
goto out_unlock;
}
ret = sscanf(buf, "%d", &host_sleep);
if (ret != 1) {
ret = -EINVAL;
goto out_unlock;
}
if (host_sleep == 0)
ret = lbs_set_host_sleep(priv, 0);
else if (host_sleep == 1) {
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
netdev_info(priv->dev,
"wake parameters not configured\n");
ret = -EINVAL;
goto out_unlock;
}
ret = lbs_set_host_sleep(priv, 1);
} else {
netdev_err(priv->dev, "invalid option\n");
ret = -EINVAL;
}
if (!ret)
ret = count;
out_unlock:
free_page(addr);
return ret;
}
static ssize_t lbs_host_sleep_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t ret;
size_t pos = 0;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
pos += snprintf(buf, len, "%d\n", priv->is_host_sleep_activated);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
free_page(addr);
return ret;
}
/*
* When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might
* get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the
* firmware. Here's an example:
* 04 01 02 00 00 00 05 01 02 00 00 00 06 01 02 00
* 00 00 07 01 02 00 3c 00 00 00 00 00 00 00 03 03
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
* The 04 01 is the TLV type (here TLV_TYPE_RSSI_LOW), 02 00 is the length,
* 00 00 are the data bytes of this TLV. For this TLV, their meaning is
* defined in mrvlietypes_thresholds
*
* This function searches in this TLV data chunk for a given TLV type
* and returns a pointer to the first data byte of the TLV, or to NULL
* if the TLV hasn't been found.
*/
static void *lbs_tlv_find(uint16_t tlv_type, const uint8_t *tlv, uint16_t size)
{
struct mrvl_ie_header *tlv_h;
uint16_t length;
ssize_t pos = 0;
while (pos < size) {
tlv_h = (struct mrvl_ie_header *) tlv;
if (!tlv_h->len)
return NULL;
if (tlv_h->type == cpu_to_le16(tlv_type))
return tlv_h;
length = le16_to_cpu(tlv_h->len) + sizeof(*tlv_h);
pos += length;
tlv += length;
}
return NULL;
}
static ssize_t lbs_threshold_read(uint16_t tlv_type, uint16_t event_mask,
struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct cmd_ds_802_11_subscribe_event *subscribed;
struct mrvl_ie_thresholds *got;
struct lbs_private *priv = file->private_data;
ssize_t ret = 0;
size_t pos = 0;
char *buf;
u8 value;
u8 freq;
int events = 0;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
subscribed = kzalloc(sizeof(*subscribed), GFP_KERNEL);
if (!subscribed) {
ret = -ENOMEM;
goto out_page;
}
subscribed->hdr.size = cpu_to_le16(sizeof(*subscribed));
subscribed->action = cpu_to_le16(CMD_ACT_GET);
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, subscribed);
if (ret)
goto out_cmd;
got = lbs_tlv_find(tlv_type, subscribed->tlv, sizeof(subscribed->tlv));
if (got) {
value = got->value;
freq = got->freq;
events = le16_to_cpu(subscribed->events);
pos += snprintf(buf, len, "%d %d %d\n", value, freq,
!!(events & event_mask));
}
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
out_cmd:
kfree(subscribed);
out_page:
free_page((unsigned long)buf);
return ret;
}
static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask,
struct file *file,
const char __user *userbuf, size_t count,
loff_t *ppos)
{
struct cmd_ds_802_11_subscribe_event *events;
struct mrvl_ie_thresholds *tlv;
struct lbs_private *priv = file->private_data;
ssize_t buf_size;
int value, freq, new_mask;
uint16_t curr_mask;
char *buf;
int ret;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
ret = -EFAULT;
goto out_page;
}
ret = sscanf(buf, "%d %d %d", &value, &freq, &new_mask);
if (ret != 3) {
ret = -EINVAL;
goto out_page;
}
events = kzalloc(sizeof(*events), GFP_KERNEL);
if (!events) {
ret = -ENOMEM;
goto out_page;
}
events->hdr.size = cpu_to_le16(sizeof(*events));
events->action = cpu_to_le16(CMD_ACT_GET);
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events);
if (ret)
goto out_events;
curr_mask = le16_to_cpu(events->events);
if (new_mask)
new_mask = curr_mask | event_mask;
else
new_mask = curr_mask & ~event_mask;
/* Now everything is set and we can send stuff down to the firmware */
tlv = (void *)events->tlv;
events->action = cpu_to_le16(CMD_ACT_SET);
events->events = cpu_to_le16(new_mask);
tlv->header.type = cpu_to_le16(tlv_type);
tlv->header.len = cpu_to_le16(sizeof(*tlv) - sizeof(tlv->header));
tlv->value = value;
if (tlv_type != TLV_TYPE_BCNMISS)
tlv->freq = freq;
/* The command header, the action, the event mask, and one TLV */
events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv));
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events);
if (!ret)
ret = count;
out_events:
kfree(events);
out_page:
free_page((unsigned long)buf);
return ret;
}
static ssize_t lbs_lowrssi_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_RSSI_LOW, CMD_SUBSCRIBE_RSSI_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_lowrssi_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_RSSI_LOW, CMD_SUBSCRIBE_RSSI_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_lowsnr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_lowsnr_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_failcount_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_FAILCOUNT, CMD_SUBSCRIBE_FAILCOUNT,
file, userbuf, count, ppos);
}
static ssize_t lbs_failcount_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_FAILCOUNT, CMD_SUBSCRIBE_FAILCOUNT,
file, userbuf, count, ppos);
}
static ssize_t lbs_highrssi_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_RSSI_HIGH, CMD_SUBSCRIBE_RSSI_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_highrssi_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_RSSI_HIGH, CMD_SUBSCRIBE_RSSI_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_highsnr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_highsnr_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_bcnmiss_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_BCNMISS, CMD_SUBSCRIBE_BCNMISS,
file, userbuf, count, ppos);
}
static ssize_t lbs_bcnmiss_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_BCNMISS, CMD_SUBSCRIBE_BCNMISS,
file, userbuf, count, ppos);
}
static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t pos = 0;
int ret;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
u32 val = 0;
if (!buf)
return -ENOMEM;
ret = lbs_get_reg(priv, CMD_MAC_REG_ACCESS, priv->mac_offset, &val);
mdelay(10);
if (!ret) {
pos = snprintf(buf, len, "MAC[0x%x] = 0x%08x\n",
priv->mac_offset, val);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
}
free_page(addr);
return ret;
}
static ssize_t lbs_rdmac_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
priv->mac_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_wrmac_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
u32 offset, value;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
res = sscanf(buf, "%x %x", &offset, &value);
if (res != 2) {
res = -EFAULT;
goto out_unlock;
}
res = lbs_set_reg(priv, CMD_MAC_REG_ACCESS, offset, value);
mdelay(10);
if (!res)
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t pos = 0;
int ret;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
u32 val;
if (!buf)
return -ENOMEM;
ret = lbs_get_reg(priv, CMD_BBP_REG_ACCESS, priv->bbp_offset, &val);
mdelay(10);
if (!ret) {
pos = snprintf(buf, len, "BBP[0x%x] = 0x%08x\n",
priv->bbp_offset, val);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
}
free_page(addr);
return ret;
}
static ssize_t lbs_rdbbp_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
priv->bbp_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_wrbbp_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
u32 offset, value;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
res = sscanf(buf, "%x %x", &offset, &value);
if (res != 2) {
res = -EFAULT;
goto out_unlock;
}
res = lbs_set_reg(priv, CMD_BBP_REG_ACCESS, offset, value);
mdelay(10);
if (!res)
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t pos = 0;
int ret;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
u32 val;
if (!buf)
return -ENOMEM;
ret = lbs_get_reg(priv, CMD_RF_REG_ACCESS, priv->rf_offset, &val);
mdelay(10);
if (!ret) {
pos = snprintf(buf, len, "RF[0x%x] = 0x%08x\n",
priv->rf_offset, val);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
}
free_page(addr);
return ret;
}
static ssize_t lbs_rdrf_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
priv->rf_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_wrrf_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
u32 offset, value;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
res = sscanf(buf, "%x %x", &offset, &value);
if (res != 2) {
res = -EFAULT;
goto out_unlock;
}
res = lbs_set_reg(priv, CMD_RF_REG_ACCESS, offset, value);
mdelay(10);
if (!res)
res = count;
out_unlock:
free_page(addr);
return res;
}
#define FOPS(fread, fwrite) { \
.owner = THIS_MODULE, \
.open = simple_open, \
.read = (fread), \
.write = (fwrite), \
.llseek = generic_file_llseek, \
}
struct lbs_debugfs_files {
const char *name;
umode_t perm;
struct file_operations fops;
};
static const struct lbs_debugfs_files debugfs_files[] = {
{ "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
{ "sleepparams", 0644, FOPS(lbs_sleepparams_read,
lbs_sleepparams_write), },
{ "hostsleep", 0644, FOPS(lbs_host_sleep_read,
lbs_host_sleep_write), },
};
static const struct lbs_debugfs_files debugfs_events_files[] = {
{"low_rssi", 0644, FOPS(lbs_lowrssi_read,
lbs_lowrssi_write), },
{"low_snr", 0644, FOPS(lbs_lowsnr_read,
lbs_lowsnr_write), },
{"failure_count", 0644, FOPS(lbs_failcount_read,
lbs_failcount_write), },
{"beacon_missed", 0644, FOPS(lbs_bcnmiss_read,
lbs_bcnmiss_write), },
{"high_rssi", 0644, FOPS(lbs_highrssi_read,
lbs_highrssi_write), },
{"high_snr", 0644, FOPS(lbs_highsnr_read,
lbs_highsnr_write), },
};
static const struct lbs_debugfs_files debugfs_regs_files[] = {
{"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), },
{"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), },
{"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), },
{"wrbbp", 0600, FOPS(NULL, lbs_wrbbp_write), },
{"rdrf", 0644, FOPS(lbs_rdrf_read, lbs_rdrf_write), },
{"wrrf", 0600, FOPS(NULL, lbs_wrrf_write), },
};
void lbs_debugfs_init(void)
{
if (!lbs_dir)
lbs_dir = debugfs_create_dir("lbs_wireless", NULL);
}
void lbs_debugfs_remove(void)
{
if (lbs_dir)
debugfs_remove(lbs_dir);
}
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
{
int i;
const struct lbs_debugfs_files *files;
if (!lbs_dir)
goto exit;
priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir);
if (!priv->debugfs_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_files); i++) {
files = &debugfs_files[i];
priv->debugfs_files[i] = debugfs_create_file(files->name,
files->perm,
priv->debugfs_dir,
priv,
&files->fops);
}
priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir);
if (!priv->events_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_events_files); i++) {
files = &debugfs_events_files[i];
priv->debugfs_events_files[i] = debugfs_create_file(files->name,
files->perm,
priv->events_dir,
priv,
&files->fops);
}
priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir);
if (!priv->regs_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_regs_files); i++) {
files = &debugfs_regs_files[i];
priv->debugfs_regs_files[i] = debugfs_create_file(files->name,
files->perm,
priv->regs_dir,
priv,
&files->fops);
}
#ifdef PROC_DEBUG
lbs_debug_init(priv);
#endif
exit:
return;
}
void lbs_debugfs_remove_one(struct lbs_private *priv)
{
int i;
for(i=0; i<ARRAY_SIZE(debugfs_regs_files); i++)
debugfs_remove(priv->debugfs_regs_files[i]);
debugfs_remove(priv->regs_dir);
for(i=0; i<ARRAY_SIZE(debugfs_events_files); i++)
debugfs_remove(priv->debugfs_events_files[i]);
debugfs_remove(priv->events_dir);
#ifdef PROC_DEBUG
debugfs_remove(priv->debugfs_debug);
#endif
for(i=0; i<ARRAY_SIZE(debugfs_files); i++)
debugfs_remove(priv->debugfs_files[i]);
debugfs_remove(priv->debugfs_dir);
}
/* debug entry */
#ifdef PROC_DEBUG
#define item_size(n) (FIELD_SIZEOF(struct lbs_private, n))
#define item_addr(n) (offsetof(struct lbs_private, n))
struct debug_data {
char name[32];
u32 size;
size_t addr;
};
/* To debug any member of struct lbs_private, simply add one line here.
*/
static struct debug_data items[] = {
{"psmode", item_size(psmode), item_addr(psmode)},
{"psstate", item_size(psstate), item_addr(psstate)},
};
static int num_of_items = ARRAY_SIZE(items);
/**
* lbs_debugfs_read - proc read function
*
* @file: file to read
* @userbuf: pointer to buffer
* @count: number of bytes to read
* @ppos: read data starting position
*
* returns: amount of data read or negative error code
*/
static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
int val = 0;
size_t pos = 0;
ssize_t res;
char *p;
int i;
struct debug_data *d;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
p = buf;
d = file->private_data;
for (i = 0; i < num_of_items; i++) {
if (d[i].size == 1)
val = *((u8 *) d[i].addr);
else if (d[i].size == 2)
val = *((u16 *) d[i].addr);
else if (d[i].size == 4)
val = *((u32 *) d[i].addr);
else if (d[i].size == 8)
val = *((u64 *) d[i].addr);
pos += sprintf(p + pos, "%s=%d\n", d[i].name, val);
}
res = simple_read_from_buffer(userbuf, count, ppos, p, pos);
free_page(addr);
return res;
}
/**
* lbs_debugfs_write - proc write function
*
* @f: file pointer
* @buf: pointer to data buffer
* @cnt: data number to write
* @ppos: file position
*
* returns: amount of data written
*/
static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
size_t cnt, loff_t *ppos)
{
int r, i;
char *pdata;
char *p;
char *p0;
char *p1;
char *p2;
struct debug_data *d = f->private_data;
pdata = kmalloc(cnt, GFP_KERNEL);
if (pdata == NULL)
return 0;
if (copy_from_user(pdata, buf, cnt)) {
lbs_deb_debugfs("Copy from user failed\n");
kfree(pdata);
return 0;
}
p0 = pdata;
for (i = 0; i < num_of_items; i++) {
do {
p = strstr(p0, d[i].name);
if (p == NULL)
break;
p1 = strchr(p, '\n');
if (p1 == NULL)
break;
p0 = p1++;
p2 = strchr(p, '=');
if (!p2)
break;
p2++;
r = simple_strtoul(p2, NULL, 0);
if (d[i].size == 1)
*((u8 *) d[i].addr) = (u8) r;
else if (d[i].size == 2)
*((u16 *) d[i].addr) = (u16) r;
else if (d[i].size == 4)
*((u32 *) d[i].addr) = (u32) r;
else if (d[i].size == 8)
*((u64 *) d[i].addr) = (u64) r;
break;
} while (1);
}
kfree(pdata);
return (ssize_t)cnt;
}
static const struct file_operations lbs_debug_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = lbs_debugfs_write,
.read = lbs_debugfs_read,
.llseek = default_llseek,
};
/**
* lbs_debug_init - create debug proc file
*
* @priv: pointer to &struct lbs_private
*
* returns: N/A
*/
static void lbs_debug_init(struct lbs_private *priv)
{
int i;
if (!priv->debugfs_dir)
return;
for (i = 0; i < num_of_items; i++)
items[i].addr += (size_t) priv;
priv->debugfs_debug = debugfs_create_file("debug", 0644,
priv->debugfs_dir, &items[0],
&lbs_debug_fops);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5798_0 |
crossvul-cpp_data_good_2319_0 | /*
* Copyright (c) 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Copyright (C) 2006-2008 Intel Corporation
* Copyright IBM Corporation, 2008
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Author: Allen M. Kay <allen.m.kay@intel.com>
* Author: Weidong Han <weidong.han@intel.com>
* Author: Ben-Ami Yassour <benami@il.ibm.com>
*/
#include <linux/list.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/dmar.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
static bool allow_unsafe_assigned_interrupts;
module_param_named(allow_unsafe_assigned_interrupts,
allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
"Enable device assignment on platforms without interrupt remapping support.");
static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages);
static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
unsigned long npages)
{
gfn_t end_gfn;
pfn_t pfn;
pfn = gfn_to_pfn_memslot(slot, gfn);
end_gfn = gfn + npages;
gfn += 1;
if (is_error_noslot_pfn(pfn))
return pfn;
while (gfn < end_gfn)
gfn_to_pfn_memslot(slot, gfn++);
return pfn;
}
static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
{
unsigned long i;
for (i = 0; i < npages; ++i)
kvm_release_pfn_clean(pfn + i);
}
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
pfn_t pfn;
int r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int flags;
/* check if iommu exists and in use */
if (!domain)
return 0;
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
while (gfn < end_gfn) {
unsigned long page_size;
/* Check if already mapped */
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
gfn += 1;
continue;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size(kvm, gfn);
/* Make sure the page_size does not exceed the memslot */
while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
page_size >>= 1;
/* Make sure gfn is aligned to the page size we want to map */
while ((gfn << PAGE_SHIFT) & (page_size - 1))
page_size >>= 1;
/* Make sure hva is aligned to the page size we want to map */
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
page_size >>= 1;
/*
* Pin all pages we are about to map in memory. This is
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
goto unmap_pages;
}
gfn += page_size >> PAGE_SHIFT;
}
return 0;
unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
return r;
}
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int idx, r = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
if (kvm->arch.iommu_noncoherent)
kvm_arch_register_noncoherent_dma(kvm);
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
r = kvm_iommu_map_pages(kvm, memslot);
if (r)
break;
}
srcu_read_unlock(&kvm->srcu, idx);
return r;
}
int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
struct pci_dev *pdev = NULL;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int r;
bool noncoherent;
/* check if iommu exists and in use */
if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
r = iommu_attach_device(domain, &pdev->dev);
if (r) {
dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
return r;
}
noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
/* Check if need to update IOMMU page table for guest memory */
if (noncoherent != kvm->arch.iommu_noncoherent) {
kvm_iommu_unmap_memslots(kvm);
kvm->arch.iommu_noncoherent = noncoherent;
r = kvm_iommu_map_memslots(kvm);
if (r)
goto out_unmap;
}
pci_set_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm assign device\n");
return 0;
out_unmap:
kvm_iommu_unmap_memslots(kvm);
return r;
}
int kvm_deassign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
struct pci_dev *pdev = NULL;
/* check if iommu exists and in use */
if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
iommu_detach_device(domain, &pdev->dev);
pci_clear_dev_assigned(pdev);
dev_info(&pdev->dev, "kvm deassign device\n");
return 0;
}
int kvm_iommu_map_guest(struct kvm *kvm)
{
int r;
if (!iommu_present(&pci_bus_type)) {
printk(KERN_ERR "%s: iommu not found\n", __func__);
return -ENODEV;
}
mutex_lock(&kvm->slots_lock);
kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
if (!kvm->arch.iommu_domain) {
r = -ENOMEM;
goto out_unlock;
}
if (!allow_unsafe_assigned_interrupts &&
!iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
printk(KERN_WARNING "%s: No interrupt remapping support,"
" disallowing device assignment."
" Re-enble with \"allow_unsafe_assigned_interrupts=1\""
" module option.\n", __func__);
iommu_domain_free(kvm->arch.iommu_domain);
kvm->arch.iommu_domain = NULL;
r = -EPERM;
goto out_unlock;
}
r = kvm_iommu_map_memslots(kvm);
if (r)
kvm_iommu_unmap_memslots(kvm);
out_unlock:
mutex_unlock(&kvm->slots_lock);
return r;
}
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages)
{
struct iommu_domain *domain;
gfn_t end_gfn, gfn;
pfn_t pfn;
u64 phys;
domain = kvm->arch.iommu_domain;
end_gfn = base_gfn + npages;
gfn = base_gfn;
/* check if iommu exists and in use */
if (!domain)
return;
while (gfn < end_gfn) {
unsigned long unmap_pages;
size_t size;
/* Get physical address */
phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
if (!phys) {
gfn++;
continue;
}
pfn = phys >> PAGE_SHIFT;
/* Unmap address from IO address space */
size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
unmap_pages = 1ULL << get_order(size);
/* Unpin all pages we just unmapped to not leak any memory */
kvm_unpin_pages(kvm, pfn, unmap_pages);
gfn += unmap_pages;
}
}
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
}
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int idx;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots)
kvm_iommu_unmap_pages(kvm, memslot);
srcu_read_unlock(&kvm->srcu, idx);
if (kvm->arch.iommu_noncoherent)
kvm_arch_unregister_noncoherent_dma(kvm);
return 0;
}
int kvm_iommu_unmap_guest(struct kvm *kvm)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
/* check if iommu exists and in use */
if (!domain)
return 0;
mutex_lock(&kvm->slots_lock);
kvm_iommu_unmap_memslots(kvm);
kvm->arch.iommu_domain = NULL;
kvm->arch.iommu_noncoherent = false;
mutex_unlock(&kvm->slots_lock);
iommu_domain_free(domain);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2319_0 |
crossvul-cpp_data_bad_2020_6 | /*
* in/out function for ltree and lquery
* Teodor Sigaev <teodor@stack.net>
* contrib/ltree/ltree_io.c
*/
#include "postgres.h"
#include <ctype.h>
#include "ltree.h"
#include "crc32.h"
PG_FUNCTION_INFO_V1(ltree_in);
Datum ltree_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_out);
Datum ltree_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(lquery_in);
Datum lquery_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(lquery_out);
Datum lquery_out(PG_FUNCTION_ARGS);
#define UNCHAR ereport(ERROR, \
(errcode(ERRCODE_SYNTAX_ERROR), \
errmsg("syntax error at position %d", \
pos)));
typedef struct
{
char *start;
int len; /* length in bytes */
int flag;
int wlen; /* length in characters */
} nodeitem;
#define LTPRS_WAITNAME 0
#define LTPRS_WAITDELIM 1
Datum
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
char *ptr;
nodeitem *list,
*lptr;
int num = 0,
totallen = 0;
int state = LTPRS_WAITNAME;
ltree *result;
ltree_level *curlevel;
int charlen;
int pos = 0;
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (charlen == 1 && t_iseq(ptr, '.'))
num++;
ptr += charlen;
}
list = lptr = (nodeitem *) palloc(sizeof(nodeitem) * (num + 1));
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (state == LTPRS_WAITNAME)
{
if (ISALNUM(ptr))
{
lptr->start = ptr;
lptr->wlen = 0;
state = LTPRS_WAITDELIM;
}
else
UNCHAR;
}
else if (state == LTPRS_WAITDELIM)
{
if (charlen == 1 && t_iseq(ptr, '.'))
{
lptr->len = ptr - lptr->start;
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
state = LTPRS_WAITNAME;
}
else if (!ISALNUM(ptr))
UNCHAR;
}
else
/* internal error */
elog(ERROR, "internal error in parser");
ptr += charlen;
lptr->wlen++;
pos++;
}
if (state == LTPRS_WAITDELIM)
{
lptr->len = ptr - lptr->start;
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
}
else if (!(state == LTPRS_WAITNAME && lptr == list))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
result = (ltree *) palloc0(LTREE_HDRSIZE + totallen);
SET_VARSIZE(result, LTREE_HDRSIZE + totallen);
result->numlevel = lptr - list;
curlevel = LTREE_FIRST(result);
lptr = list;
while (lptr - list < result->numlevel)
{
curlevel->len = (uint16) lptr->len;
memcpy(curlevel->name, lptr->start, lptr->len);
curlevel = LEVEL_NEXT(curlevel);
lptr++;
}
pfree(list);
PG_RETURN_POINTER(result);
}
Datum
ltree_out(PG_FUNCTION_ARGS)
{
ltree *in = PG_GETARG_LTREE(0);
char *buf,
*ptr;
int i;
ltree_level *curlevel;
ptr = buf = (char *) palloc(VARSIZE(in));
curlevel = LTREE_FIRST(in);
for (i = 0; i < in->numlevel; i++)
{
if (i != 0)
{
*ptr = '.';
ptr++;
}
memcpy(ptr, curlevel->name, curlevel->len);
ptr += curlevel->len;
curlevel = LEVEL_NEXT(curlevel);
}
*ptr = '\0';
PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(buf);
}
#define LQPRS_WAITLEVEL 0
#define LQPRS_WAITDELIM 1
#define LQPRS_WAITOPEN 2
#define LQPRS_WAITFNUM 3
#define LQPRS_WAITSNUM 4
#define LQPRS_WAITND 5
#define LQPRS_WAITCLOSE 6
#define LQPRS_WAITEND 7
#define LQPRS_WAITVAR 8
#define GETVAR(x) ( *((nodeitem**)LQL_FIRST(x)) )
#define ITEMSIZE MAXALIGN(LQL_HDRSIZE+sizeof(nodeitem*))
#define NEXTLEV(x) ( (lquery_level*)( ((char*)(x)) + ITEMSIZE) )
Datum
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
char *ptr;
int num = 0,
totallen = 0,
numOR = 0;
int state = LQPRS_WAITLEVEL;
lquery *result;
nodeitem *lptr = NULL;
lquery_level *cur,
*curqlevel,
*tmpql;
lquery_variant *lrptr = NULL;
bool hasnot = false;
bool wasbad = false;
int charlen;
int pos = 0;
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (charlen == 1)
{
if (t_iseq(ptr, '.'))
num++;
else if (t_iseq(ptr, '|'))
numOR++;
}
ptr += charlen;
}
num++;
curqlevel = tmpql = (lquery_level *) palloc0(ITEMSIZE * num);
ptr = buf;
while (*ptr)
{
charlen = pg_mblen(ptr);
if (state == LQPRS_WAITLEVEL)
{
if (ISALNUM(ptr))
{
GETVAR(curqlevel) = lptr = (nodeitem *) palloc0(sizeof(nodeitem) * (numOR + 1));
lptr->start = ptr;
state = LQPRS_WAITDELIM;
curqlevel->numvar = 1;
}
else if (charlen == 1 && t_iseq(ptr, '!'))
{
GETVAR(curqlevel) = lptr = (nodeitem *) palloc0(sizeof(nodeitem) * (numOR + 1));
lptr->start = ptr + 1;
state = LQPRS_WAITDELIM;
curqlevel->numvar = 1;
curqlevel->flag |= LQL_NOT;
hasnot = true;
}
else if (charlen == 1 && t_iseq(ptr, '*'))
state = LQPRS_WAITOPEN;
else
UNCHAR;
}
else if (state == LQPRS_WAITVAR)
{
if (ISALNUM(ptr))
{
lptr++;
lptr->start = ptr;
state = LQPRS_WAITDELIM;
curqlevel->numvar++;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITDELIM)
{
if (charlen == 1 && t_iseq(ptr, '@'))
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_INCASE;
curqlevel->flag |= LVAR_INCASE;
}
else if (charlen == 1 && t_iseq(ptr, '*'))
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_ANYEND;
curqlevel->flag |= LVAR_ANYEND;
}
else if (charlen == 1 && t_iseq(ptr, '%'))
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_SUBLEXEME;
curqlevel->flag |= LVAR_SUBLEXEME;
}
else if (charlen == 1 && t_iseq(ptr, '|'))
{
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEME) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
((lptr->flag & LVAR_ANYEND) ? 1 : 0);
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
state = LQPRS_WAITVAR;
}
else if (charlen == 1 && t_iseq(ptr, '.'))
{
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEME) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
((lptr->flag & LVAR_ANYEND) ? 1 : 0);
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
}
else if (ISALNUM(ptr))
{
if (lptr->flag)
UNCHAR;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITOPEN)
{
if (charlen == 1 && t_iseq(ptr, '{'))
state = LQPRS_WAITFNUM;
else if (charlen == 1 && t_iseq(ptr, '.'))
{
curqlevel->low = 0;
curqlevel->high = 0xffff;
curqlevel = NEXTLEV(curqlevel);
state = LQPRS_WAITLEVEL;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITFNUM)
{
if (charlen == 1 && t_iseq(ptr, ','))
state = LQPRS_WAITSNUM;
else if (t_isdigit(ptr))
{
curqlevel->low = atoi(ptr);
state = LQPRS_WAITND;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITSNUM)
{
if (t_isdigit(ptr))
{
curqlevel->high = atoi(ptr);
state = LQPRS_WAITCLOSE;
}
else if (charlen == 1 && t_iseq(ptr, '}'))
{
curqlevel->high = 0xffff;
state = LQPRS_WAITEND;
}
else
UNCHAR;
}
else if (state == LQPRS_WAITCLOSE)
{
if (charlen == 1 && t_iseq(ptr, '}'))
state = LQPRS_WAITEND;
else if (!t_isdigit(ptr))
UNCHAR;
}
else if (state == LQPRS_WAITND)
{
if (charlen == 1 && t_iseq(ptr, '}'))
{
curqlevel->high = curqlevel->low;
state = LQPRS_WAITEND;
}
else if (charlen == 1 && t_iseq(ptr, ','))
state = LQPRS_WAITSNUM;
else if (!t_isdigit(ptr))
UNCHAR;
}
else if (state == LQPRS_WAITEND)
{
if (charlen == 1 && t_iseq(ptr, '.'))
{
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
}
else
UNCHAR;
}
else
/* internal error */
elog(ERROR, "internal error in parser");
ptr += charlen;
if (state == LQPRS_WAITDELIM)
lptr->wlen++;
pos++;
}
if (state == LQPRS_WAITDELIM)
{
if (lptr->start == ptr)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEME) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
((lptr->flag & LVAR_ANYEND) ? 1 : 0);
if (lptr->len == 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
if (lptr->wlen > 255)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("name of level is too long"),
errdetail("Name length is %d, must "
"be < 256, in position %d.",
lptr->wlen, pos)));
}
else if (state == LQPRS_WAITOPEN)
curqlevel->high = 0xffff;
else if (state != LQPRS_WAITEND)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Unexpected end of line.")));
curqlevel = tmpql;
totallen = LQUERY_HDRSIZE;
while ((char *) curqlevel - (char *) tmpql < num * ITEMSIZE)
{
totallen += LQL_HDRSIZE;
if (curqlevel->numvar)
{
lptr = GETVAR(curqlevel);
while (lptr - GETVAR(curqlevel) < curqlevel->numvar)
{
totallen += MAXALIGN(LVAR_HDRSIZE + lptr->len);
lptr++;
}
}
else if (curqlevel->low > curqlevel->high)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Low limit(%d) is greater than upper(%d).",
curqlevel->low, curqlevel->high)));
curqlevel = NEXTLEV(curqlevel);
}
result = (lquery *) palloc0(totallen);
SET_VARSIZE(result, totallen);
result->numlevel = num;
result->firstgood = 0;
result->flag = 0;
if (hasnot)
result->flag |= LQUERY_HASNOT;
cur = LQUERY_FIRST(result);
curqlevel = tmpql;
while ((char *) curqlevel - (char *) tmpql < num * ITEMSIZE)
{
memcpy(cur, curqlevel, LQL_HDRSIZE);
cur->totallen = LQL_HDRSIZE;
if (curqlevel->numvar)
{
lrptr = LQL_FIRST(cur);
lptr = GETVAR(curqlevel);
while (lptr - GETVAR(curqlevel) < curqlevel->numvar)
{
cur->totallen += MAXALIGN(LVAR_HDRSIZE + lptr->len);
lrptr->len = lptr->len;
lrptr->flag = lptr->flag;
lrptr->val = ltree_crc32_sz(lptr->start, lptr->len);
memcpy(lrptr->name, lptr->start, lptr->len);
lptr++;
lrptr = LVAR_NEXT(lrptr);
}
pfree(GETVAR(curqlevel));
if (cur->numvar > 1 || cur->flag != 0)
wasbad = true;
else if (wasbad == false)
(result->firstgood)++;
}
else
wasbad = true;
curqlevel = NEXTLEV(curqlevel);
cur = LQL_NEXT(cur);
}
pfree(tmpql);
PG_RETURN_POINTER(result);
}
Datum
lquery_out(PG_FUNCTION_ARGS)
{
lquery *in = PG_GETARG_LQUERY(0);
char *buf,
*ptr;
int i,
j,
totallen = 1;
lquery_level *curqlevel;
lquery_variant *curtlevel;
curqlevel = LQUERY_FIRST(in);
for (i = 0; i < in->numlevel; i++)
{
totallen++;
if (curqlevel->numvar)
totallen += 1 + (curqlevel->numvar * 4) + curqlevel->totallen;
else
totallen += 2 * 11 + 4;
curqlevel = LQL_NEXT(curqlevel);
}
ptr = buf = (char *) palloc(totallen);
curqlevel = LQUERY_FIRST(in);
for (i = 0; i < in->numlevel; i++)
{
if (i != 0)
{
*ptr = '.';
ptr++;
}
if (curqlevel->numvar)
{
if (curqlevel->flag & LQL_NOT)
{
*ptr = '!';
ptr++;
}
curtlevel = LQL_FIRST(curqlevel);
for (j = 0; j < curqlevel->numvar; j++)
{
if (j != 0)
{
*ptr = '|';
ptr++;
}
memcpy(ptr, curtlevel->name, curtlevel->len);
ptr += curtlevel->len;
if ((curtlevel->flag & LVAR_SUBLEXEME))
{
*ptr = '%';
ptr++;
}
if ((curtlevel->flag & LVAR_INCASE))
{
*ptr = '@';
ptr++;
}
if ((curtlevel->flag & LVAR_ANYEND))
{
*ptr = '*';
ptr++;
}
curtlevel = LVAR_NEXT(curtlevel);
}
}
else
{
if (curqlevel->low == curqlevel->high)
{
sprintf(ptr, "*{%d}", curqlevel->low);
}
else if (curqlevel->low == 0)
{
if (curqlevel->high == 0xffff)
{
*ptr = '*';
*(ptr + 1) = '\0';
}
else
sprintf(ptr, "*{,%d}", curqlevel->high);
}
else if (curqlevel->high == 0xffff)
{
sprintf(ptr, "*{%d,}", curqlevel->low);
}
else
sprintf(ptr, "*{%d,%d}", curqlevel->low, curqlevel->high);
ptr = strchr(ptr, '\0');
}
curqlevel = LQL_NEXT(curqlevel);
}
*ptr = '\0';
PG_FREE_IF_COPY(in, 0);
PG_RETURN_POINTER(buf);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_6 |
crossvul-cpp_data_bad_3521_0 | /*
* linux/mm/oom_kill.c
*
* Copyright (C) 1998,2000 Rik van Riel
* Thanks go out to Claus Fischer for some serious inspiration and
* for goading me into coding this file...
* Copyright (C) 2010 Google, Inc.
* Rewritten by David Rientjes
*
* The routines in this file are used to kill a process when
* we're seriously out of memory. This gets called from __alloc_pages()
* in mm/page_alloc.c when we really run out of memory.
*
* Since we won't call these routines often (on a well-configured
* machine) this file will double as a 'coding guide' and a signpost
* for newbie kernel hackers. It features several pointers to major
* kernel subsystems and hints as to where to find out what things do.
*/
#include <linux/oom.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/cpuset.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
#include <linux/mempolicy.h>
#include <linux/security.h>
#include <linux/ptrace.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
static DEFINE_SPINLOCK(zone_scan_lock);
/**
* test_set_oom_score_adj() - set current's oom_score_adj and return old value
* @new_val: new oom_score_adj value
*
* Sets the oom_score_adj value for current to @new_val with proper
* synchronization and returns the old value. Usually used to temporarily
* set a value, save the old value in the caller, and then reinstate it later.
*/
int test_set_oom_score_adj(int new_val)
{
struct sighand_struct *sighand = current->sighand;
int old_val;
spin_lock_irq(&sighand->siglock);
old_val = current->signal->oom_score_adj;
if (new_val != old_val) {
if (new_val == OOM_SCORE_ADJ_MIN)
atomic_inc(¤t->mm->oom_disable_count);
else if (old_val == OOM_SCORE_ADJ_MIN)
atomic_dec(¤t->mm->oom_disable_count);
current->signal->oom_score_adj = new_val;
}
spin_unlock_irq(&sighand->siglock);
return old_val;
}
#ifdef CONFIG_NUMA
/**
* has_intersects_mems_allowed() - check task eligiblity for kill
* @tsk: task struct of which task to consider
* @mask: nodemask passed to page allocator for mempolicy ooms
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
* and whether or not it has the same set of allowed cpuset nodes.
*/
static bool has_intersects_mems_allowed(struct task_struct *tsk,
const nodemask_t *mask)
{
struct task_struct *start = tsk;
do {
if (mask) {
/*
* If this is a mempolicy constrained oom, tsk's
* cpuset is irrelevant. Only return true if its
* mempolicy intersects current, otherwise it may be
* needlessly killed.
*/
if (mempolicy_nodemask_intersects(tsk, mask))
return true;
} else {
/*
* This is not a mempolicy constrained oom, so only
* check the mems of tsk's cpuset.
*/
if (cpuset_mems_allowed_intersects(current, tsk))
return true;
}
} while_each_thread(start, tsk);
return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
const nodemask_t *mask)
{
return true;
}
#endif /* CONFIG_NUMA */
/*
* The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held.
*/
struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t = p;
do {
task_lock(t);
if (likely(t->mm))
return t;
task_unlock(t);
} while_each_thread(p, t);
return NULL;
}
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p,
const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
if (p->flags & PF_KTHREAD)
return true;
/* When mem_cgroup_out_of_memory() and p is not member of the group */
if (mem && !task_in_mem_cgroup(p, mem))
return true;
/* p may not have freeable memory in nodemask */
if (!has_intersects_mems_allowed(p, nodemask))
return true;
return false;
}
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
int points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
p = find_lock_task_mm(p);
if (!p)
return 0;
/*
* Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
* so the entire heuristic doesn't need to be executed for something
* that cannot be killed.
*/
if (atomic_read(&p->mm->oom_disable_count)) {
task_unlock(p);
return 0;
}
/*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
*/
if (!totalpages)
totalpages = 1;
/*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
points = get_mm_rss(p->mm) + p->mm->nr_ptes;
points += get_mm_counter(p->mm, MM_SWAPENTS);
points *= 1000;
points /= totalpages;
task_unlock(p);
/*
* Root processes get 3% bonus, just like the __vm_enough_memory()
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
points -= 30;
/*
* /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
* either completely disable oom killing or always prefer a certain
* task.
*/
points += p->signal->oom_score_adj;
/*
* Never return 0 for an eligible task that may be killed since it's
* possible that no single user task uses more than 0.1% of memory and
* no single admin tasks uses more than 3.0%.
*/
if (points <= 0)
return 1;
return (points < 1000) ? points : 1000;
}
/*
* Determine the type of allocation constraint.
*/
#ifdef CONFIG_NUMA
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
gfp_t gfp_mask, nodemask_t *nodemask,
unsigned long *totalpages)
{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
bool cpuset_limited = false;
int nid;
/* Default to all available memory */
*totalpages = totalram_pages + total_swap_pages;
if (!zonelist)
return CONSTRAINT_NONE;
/*
* Reach here only when __GFP_NOFAIL is used. So, we should avoid
* to kill current.We have to random task kill in this case.
* Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
*/
if (gfp_mask & __GFP_THISNODE)
return CONSTRAINT_NONE;
/*
* This is not a __GFP_THISNODE allocation, so a truncated nodemask in
* the page allocator means a mempolicy is in effect. Cpuset policy
* is enforced in get_page_from_freelist().
*/
if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
*totalpages = total_swap_pages;
for_each_node_mask(nid, *nodemask)
*totalpages += node_spanned_pages(nid);
return CONSTRAINT_MEMORY_POLICY;
}
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask)
if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
cpuset_limited = true;
if (cpuset_limited) {
*totalpages = total_swap_pages;
for_each_node_mask(nid, cpuset_current_mems_allowed)
*totalpages += node_spanned_pages(nid);
return CONSTRAINT_CPUSET;
}
return CONSTRAINT_NONE;
}
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
gfp_t gfp_mask, nodemask_t *nodemask,
unsigned long *totalpages)
{
*totalpages = totalram_pages + total_swap_pages;
return CONSTRAINT_NONE;
}
#endif
/*
* Simple selection loop. We chose the process with the highest
* number of 'points'. We expect the caller will lock the tasklist.
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
static struct task_struct *select_bad_process(unsigned int *ppoints,
unsigned long totalpages, struct mem_cgroup *mem,
const nodemask_t *nodemask)
{
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
*ppoints = 0;
do_each_thread(g, p) {
unsigned int points;
if (p->exit_state)
continue;
if (oom_unkillable_task(p, mem, nodemask))
continue;
/*
* This task already has access to memory reserves and is
* being killed. Don't allow any other task access to the
* memory reserve.
*
* Note: this may have a chance of deadlock if it gets
* blocked waiting for another task which itself is waiting
* for memory. Is there a better alternative?
*/
if (test_tsk_thread_flag(p, TIF_MEMDIE))
return ERR_PTR(-1UL);
if (!p->mm)
continue;
if (p->flags & PF_EXITING) {
/*
* If p is the current task and is in the process of
* releasing memory, we allow the "kill" to set
* TIF_MEMDIE, which will allow it to gain access to
* memory reserves. Otherwise, it may stall forever.
*
* The loop isn't broken here, however, in case other
* threads are found to have already been oom killed.
*/
if (p == current) {
chosen = p;
*ppoints = 1000;
} else {
/*
* If this task is not being ptraced on exit,
* then wait for it to finish before killing
* some other task unnecessarily.
*/
if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
return ERR_PTR(-1UL);
}
}
points = oom_badness(p, mem, nodemask, totalpages);
if (points > *ppoints) {
chosen = p;
*ppoints = points;
}
} while_each_thread(g, p);
return chosen;
}
/**
* dump_tasks - dump current memory state of all system tasks
* @mem: current's memory controller, if constrained
* @nodemask: nodemask passed to page allocator for mempolicy ooms
*
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
* are not shown.
* State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
* value, oom_score_adj value, and name.
*
* Call with tasklist_lock read-locked.
*/
static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
for_each_process(p) {
if (oom_unkillable_task(p, mem, nodemask))
continue;
task = find_lock_task_mm(p);
if (!task) {
/*
* This is a kthread or all of p's threads have already
* detached their mm's. There's no need to report
* them; they can't be oom killed anyway.
*/
continue;
}
pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
task->pid, task_uid(task), task->tgid,
task->mm->total_vm, get_mm_rss(task->mm),
task_cpu(task), task->signal->oom_adj,
task->signal->oom_score_adj, task->comm);
task_unlock(task);
}
}
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
struct mem_cgroup *mem, const nodemask_t *nodemask)
{
task_lock(current);
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
"oom_adj=%d, oom_score_adj=%d\n",
current->comm, gfp_mask, order, current->signal->oom_adj,
current->signal->oom_score_adj);
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
mem_cgroup_print_oom_info(mem, p);
show_mem(SHOW_MEM_FILTER_NODES);
if (sysctl_oom_dump_tasks)
dump_tasks(mem, nodemask);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
{
struct task_struct *q;
struct mm_struct *mm;
p = find_lock_task_mm(p);
if (!p)
return 1;
/* mm cannot be safely dereferenced after task_unlock(p) */
mm = p->mm;
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
task_pid_nr(p), p->comm, K(p->mm->total_vm),
K(get_mm_counter(p->mm, MM_ANONPAGES)),
K(get_mm_counter(p->mm, MM_FILEPAGES)));
task_unlock(p);
/*
* Kill all processes sharing p->mm in other thread groups, if any.
* They don't get access to memory reserves or a higher scheduler
* priority, though, to avoid depletion of all memory or task
* starvation. This prevents mm->mmap_sem livelock when an oom killed
* task cannot exit because it requires the semaphore and its contended
* by another thread trying to allocate memory itself. That thread will
* now get access to memory reserves since it has a pending fatal
* signal.
*/
for_each_process(q)
if (q->mm == mm && !same_thread_group(q, p)) {
task_lock(q); /* Protect ->comm from prctl() */
pr_err("Kill process %d (%s) sharing same memory\n",
task_pid_nr(q), q->comm);
task_unlock(q);
force_sig(SIGKILL, q);
}
set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p);
return 0;
}
#undef K
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
unsigned int points, unsigned long totalpages,
struct mem_cgroup *mem, nodemask_t *nodemask,
const char *message)
{
struct task_struct *victim = p;
struct task_struct *child;
struct task_struct *t = p;
unsigned int victim_points = 0;
if (printk_ratelimit())
dump_header(p, gfp_mask, order, mem, nodemask);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just set TIF_MEMDIE so it can die quickly
*/
if (p->flags & PF_EXITING) {
set_tsk_thread_flag(p, TIF_MEMDIE);
return 0;
}
task_lock(p);
pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
message, task_pid_nr(p), p->comm, points);
task_unlock(p);
/*
* If any of p's children has a different mm and is eligible for kill,
* the one with the highest oom_badness() score is sacrificed for its
* parent. This attempts to lose the minimal amount of work done while
* still freeing memory.
*/
do {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
if (child->mm == p->mm)
continue;
/*
* oom_badness() returns 0 if the thread is unkillable
*/
child_points = oom_badness(child, mem, nodemask,
totalpages);
if (child_points > victim_points) {
victim = child;
victim_points = child_points;
}
}
} while_each_thread(p, t);
return oom_kill_task(victim, mem);
}
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
int order, const nodemask_t *nodemask)
{
if (likely(!sysctl_panic_on_oom))
return;
if (sysctl_panic_on_oom != 2) {
/*
* panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
* does not panic for cpuset, mempolicy, or memcg allocation
* failures.
*/
if (constraint != CONSTRAINT_NONE)
return;
}
read_lock(&tasklist_lock);
dump_header(NULL, gfp_mask, order, NULL, nodemask);
read_unlock(&tasklist_lock);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
unsigned long limit;
unsigned int points = 0;
struct task_struct *p;
/*
* If current has a pending SIGKILL, then automatically select it. The
* goal is to allow it to allocate so that it may quickly exit and free
* its memory.
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
read_lock(&tasklist_lock);
retry:
p = select_bad_process(&points, limit, mem, NULL);
if (!p || PTR_ERR(p) == -1UL)
goto out;
if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
"Memory cgroup out of memory"))
goto retry;
out:
read_unlock(&tasklist_lock);
}
#endif
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
int register_oom_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);
int unregister_oom_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);
/*
* Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
* if a parallel OOM killing is already taking place that includes a zone in
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
*/
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
struct zoneref *z;
struct zone *zone;
int ret = 1;
spin_lock(&zone_scan_lock);
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
if (zone_is_oom_locked(zone)) {
ret = 0;
goto out;
}
}
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
/*
* Lock each zone in the zonelist under zone_scan_lock so a
* parallel invocation of try_set_zonelist_oom() doesn't succeed
* when it shouldn't.
*/
zone_set_flag(zone, ZONE_OOM_LOCKED);
}
out:
spin_unlock(&zone_scan_lock);
return ret;
}
/*
* Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
* allocation attempts with zonelists containing them may now recall the OOM
* killer, if necessary.
*/
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
struct zoneref *z;
struct zone *zone;
spin_lock(&zone_scan_lock);
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
zone_clear_flag(zone, ZONE_OOM_LOCKED);
}
spin_unlock(&zone_scan_lock);
}
/*
* Try to acquire the oom killer lock for all system zones. Returns zero if a
* parallel oom killing is taking place, otherwise locks all zones and returns
* non-zero.
*/
static int try_set_system_oom(void)
{
struct zone *zone;
int ret = 1;
spin_lock(&zone_scan_lock);
for_each_populated_zone(zone)
if (zone_is_oom_locked(zone)) {
ret = 0;
goto out;
}
for_each_populated_zone(zone)
zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
spin_unlock(&zone_scan_lock);
return ret;
}
/*
* Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
* attempts or page faults may now recall the oom killer, if necessary.
*/
static void clear_system_oom(void)
{
struct zone *zone;
spin_lock(&zone_scan_lock);
for_each_populated_zone(zone)
zone_clear_flag(zone, ZONE_OOM_LOCKED);
spin_unlock(&zone_scan_lock);
}
/**
* out_of_memory - kill the "best" process when we run out of memory
* @zonelist: zonelist pointer
* @gfp_mask: memory allocation flags
* @order: amount of memory being requested as a power of 2
* @nodemask: nodemask passed to page allocator
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask)
{
const nodemask_t *mpol_mask;
struct task_struct *p;
unsigned long totalpages;
unsigned long freed = 0;
unsigned int points;
enum oom_constraint constraint = CONSTRAINT_NONE;
int killed = 0;
blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
if (freed > 0)
/* Got some memory back in the last second. */
return;
/*
* If current has a pending SIGKILL, then automatically select it. The
* goal is to allow it to allocate so that it may quickly exit and free
* its memory.
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}
/*
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
&totalpages);
mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
!oom_unkillable_task(current, NULL, nodemask) &&
current->mm && !atomic_read(¤t->mm->oom_disable_count)) {
/*
* oom_kill_process() needs tasklist_lock held. If it returns
* non-zero, current could not be killed so we must fallback to
* the tasklist scan.
*/
if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
NULL, nodemask,
"Out of memory (oom_kill_allocating_task)"))
goto out;
}
retry:
p = select_bad_process(&points, totalpages, NULL, mpol_mask);
if (PTR_ERR(p) == -1UL)
goto out;
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
read_unlock(&tasklist_lock);
panic("Out of memory and no killable processes...\n");
}
if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
nodemask, "Out of memory"))
goto retry;
killed = 1;
out:
read_unlock(&tasklist_lock);
/*
* Give "p" a good chance of killing itself before we
* retry to allocate memory unless "p" is current
*/
if (killed && !test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1);
}
/*
* The pagefault handler calls here because it is out of memory, so kill a
* memory-hogging task. If a populated zone has ZONE_OOM_LOCKED set, a parallel
* oom killing is already in progress so do nothing. If a task is found with
* TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
*/
void pagefault_out_of_memory(void)
{
if (try_set_system_oom()) {
out_of_memory(NULL, 0, 0, NULL);
clear_system_oom();
}
if (!test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3521_0 |
crossvul-cpp_data_bad_2218_0 | /*
* Routines for driver control interface
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/threads.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/info.h>
#include <sound/control.h>
/* max number of user-defined controls */
#define MAX_USER_CONTROLS 32
#define MAX_CONTROL_COUNT 1028
struct snd_kctl_ioctl {
struct list_head list; /* list of all ioctls */
snd_kctl_ioctl_func_t fioctl;
};
static DECLARE_RWSEM(snd_ioctl_rwsem);
static LIST_HEAD(snd_control_ioctls);
#ifdef CONFIG_COMPAT
static LIST_HEAD(snd_control_compat_ioctls);
#endif
static int snd_ctl_open(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL);
if (!card) {
err = -ENODEV;
goto __error1;
}
err = snd_card_file_add(card, file);
if (err < 0) {
err = -ENODEV;
goto __error1;
}
if (!try_module_get(card->module)) {
err = -EFAULT;
goto __error2;
}
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (ctl == NULL) {
err = -ENOMEM;
goto __error;
}
INIT_LIST_HEAD(&ctl->events);
init_waitqueue_head(&ctl->change_sleep);
spin_lock_init(&ctl->read_lock);
ctl->card = card;
ctl->prefer_pcm_subdevice = -1;
ctl->prefer_rawmidi_subdevice = -1;
ctl->pid = get_pid(task_pid(current));
file->private_data = ctl;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_add_tail(&ctl->list, &card->ctl_files);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
snd_card_unref(card);
return 0;
__error:
module_put(card->module);
__error2:
snd_card_file_remove(card, file);
__error1:
if (card)
snd_card_unref(card);
return err;
}
static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl)
{
unsigned long flags;
struct snd_kctl_event *cread;
spin_lock_irqsave(&ctl->read_lock, flags);
while (!list_empty(&ctl->events)) {
cread = snd_kctl_event(ctl->events.next);
list_del(&cread->list);
kfree(cread);
}
spin_unlock_irqrestore(&ctl->read_lock, flags);
}
static int snd_ctl_release(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
struct snd_kcontrol *control;
unsigned int idx;
ctl = file->private_data;
file->private_data = NULL;
card = ctl->card;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_del(&ctl->list);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
down_write(&card->controls_rwsem);
list_for_each_entry(control, &card->controls, list)
for (idx = 0; idx < control->count; idx++)
if (control->vd[idx].owner == ctl)
control->vd[idx].owner = NULL;
up_write(&card->controls_rwsem);
snd_ctl_empty_read_queue(ctl);
put_pid(ctl->pid);
kfree(ctl);
module_put(card->module);
snd_card_file_remove(card, file);
return 0;
}
void snd_ctl_notify(struct snd_card *card, unsigned int mask,
struct snd_ctl_elem_id *id)
{
unsigned long flags;
struct snd_ctl_file *ctl;
struct snd_kctl_event *ev;
if (snd_BUG_ON(!card || !id))
return;
read_lock(&card->ctl_files_rwlock);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
card->mixer_oss_change_count++;
#endif
list_for_each_entry(ctl, &card->ctl_files, list) {
if (!ctl->subscribed)
continue;
spin_lock_irqsave(&ctl->read_lock, flags);
list_for_each_entry(ev, &ctl->events, list) {
if (ev->id.numid == id->numid) {
ev->mask |= mask;
goto _found;
}
}
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (ev) {
ev->id = *id;
ev->mask = mask;
list_add_tail(&ev->list, &ctl->events);
} else {
dev_err(card->dev, "No memory available to allocate event\n");
}
_found:
wake_up(&ctl->change_sleep);
spin_unlock_irqrestore(&ctl->read_lock, flags);
kill_fasync(&ctl->fasync, SIGIO, POLL_IN);
}
read_unlock(&card->ctl_files_rwlock);
}
EXPORT_SYMBOL(snd_ctl_notify);
/**
* snd_ctl_new - create a control instance from the template
* @control: the control template
* @access: the default control access
*
* Allocates a new struct snd_kcontrol instance and copies the given template
* to the new instance. It does not copy volatile data (access).
*
* Return: The pointer of the new instance, or %NULL on failure.
*/
static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
unsigned int access)
{
struct snd_kcontrol *kctl;
unsigned int idx;
if (snd_BUG_ON(!control || !control->count))
return NULL;
if (control->count > MAX_CONTROL_COUNT)
return NULL;
kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
if (kctl == NULL) {
pr_err("ALSA: Cannot allocate control instance\n");
return NULL;
}
*kctl = *control;
for (idx = 0; idx < kctl->count; idx++)
kctl->vd[idx].access = access;
return kctl;
}
/**
* snd_ctl_new1 - create a control instance from the template
* @ncontrol: the initialization record
* @private_data: the private data to set
*
* Allocates a new struct snd_kcontrol instance and initialize from the given
* template. When the access field of ncontrol is 0, it's assumed as
* READWRITE access. When the count field is 0, it's assumes as one.
*
* Return: The pointer of the newly generated instance, or %NULL on failure.
*/
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
void *private_data)
{
struct snd_kcontrol kctl;
unsigned int access;
if (snd_BUG_ON(!ncontrol || !ncontrol->info))
return NULL;
memset(&kctl, 0, sizeof(kctl));
kctl.id.iface = ncontrol->iface;
kctl.id.device = ncontrol->device;
kctl.id.subdevice = ncontrol->subdevice;
if (ncontrol->name) {
strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name));
if (strcmp(ncontrol->name, kctl.id.name) != 0)
pr_warn("ALSA: Control name '%s' truncated to '%s'\n",
ncontrol->name, kctl.id.name);
}
kctl.id.index = ncontrol->index;
kctl.count = ncontrol->count ? ncontrol->count : 1;
access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_VOLATILE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE|
SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND|
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK));
kctl.info = ncontrol->info;
kctl.get = ncontrol->get;
kctl.put = ncontrol->put;
kctl.tlv.p = ncontrol->tlv.p;
kctl.private_value = ncontrol->private_value;
kctl.private_data = private_data;
return snd_ctl_new(&kctl, access);
}
EXPORT_SYMBOL(snd_ctl_new1);
/**
* snd_ctl_free_one - release the control instance
* @kcontrol: the control instance
*
* Releases the control instance created via snd_ctl_new()
* or snd_ctl_new1().
* Don't call this after the control was added to the card.
*/
void snd_ctl_free_one(struct snd_kcontrol *kcontrol)
{
if (kcontrol) {
if (kcontrol->private_free)
kcontrol->private_free(kcontrol);
kfree(kcontrol);
}
}
EXPORT_SYMBOL(snd_ctl_free_one);
static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
unsigned int count)
{
struct snd_kcontrol *kctl;
/* Make sure that the ids assigned to the control do not wrap around */
if (card->last_numid >= UINT_MAX - count)
card->last_numid = 0;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
card->last_numid = kctl->id.numid + kctl->count - 1;
return true;
}
}
return false;
}
static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
{
unsigned int iter = 100000;
while (snd_ctl_remove_numid_conflict(card, count)) {
if (--iter == 0) {
/* this situation is very unlikely */
dev_err(card->dev, "unable to allocate new control numid\n");
return -ENOMEM;
}
}
return 0;
}
/**
* snd_ctl_add - add the control instance to the card
* @card: the card instance
* @kcontrol: the control instance to add
*
* Adds the control instance created via snd_ctl_new() or
* snd_ctl_new1() to the given card. Assigns also an unique
* numid used for fast search.
*
* It frees automatically the control which cannot be added.
*
* Return: Zero if successful, or a negative error code on failure.
*
*/
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
unsigned int count;
int err = -EINVAL;
if (! kcontrol)
return err;
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
id.iface,
id.device,
id.subdevice,
id.name,
id.index);
err = -EBUSY;
goto error;
}
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
err = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return err;
}
EXPORT_SYMBOL(snd_ctl_add);
/**
* snd_ctl_replace - replace the control instance of the card
* @card: the card instance
* @kcontrol: the control instance to replace
* @add_on_replace: add the control if not already added
*
* Replaces the given control. If the given control does not exist
* and the add_on_replace flag is set, the control is added. If the
* control exists, it is destroyed first.
*
* It frees automatically the control which cannot be added or replaced.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
bool add_on_replace)
{
struct snd_ctl_elem_id id;
unsigned int count;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
if (!kcontrol)
return -EINVAL;
if (snd_BUG_ON(!card || !kcontrol->info)) {
ret = -EINVAL;
goto error;
}
id = kcontrol->id;
down_write(&card->controls_rwsem);
old = snd_ctl_find_id(card, &id);
if (!old) {
if (add_on_replace)
goto add;
up_write(&card->controls_rwsem);
ret = -EINVAL;
goto error;
}
ret = snd_ctl_remove(card, old);
if (ret < 0) {
up_write(&card->controls_rwsem);
goto error;
}
add:
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
ret = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return ret;
}
EXPORT_SYMBOL(snd_ctl_replace);
/**
* snd_ctl_remove - remove the control from the card and release it
* @card: the card instance
* @kcontrol: the control instance to remove
*
* Removes the control from the card and then releases the instance.
* You don't need to call snd_ctl_free_one(). You must be in
* the write lock - down_write(&card->controls_rwsem).
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
if (snd_BUG_ON(!card || !kcontrol))
return -EINVAL;
list_del(&kcontrol->list);
card->controls_count -= kcontrol->count;
id = kcontrol->id;
for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id);
snd_ctl_free_one(kcontrol);
return 0;
}
EXPORT_SYMBOL(snd_ctl_remove);
/**
* snd_ctl_remove_id - remove the control of the given id and release it
* @card: the card instance
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
ret = snd_ctl_remove(card, kctl);
up_write(&card->controls_rwsem);
return ret;
}
EXPORT_SYMBOL(snd_ctl_remove_id);
/**
* snd_ctl_remove_user_ctl - remove and release the unlocked user control
* @file: active control handle
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file,
struct snd_ctl_elem_id *id)
{
struct snd_card *card = file->card;
struct snd_kcontrol *kctl;
int idx, ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto error;
}
if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) {
ret = -EINVAL;
goto error;
}
for (idx = 0; idx < kctl->count; idx++)
if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) {
ret = -EBUSY;
goto error;
}
ret = snd_ctl_remove(card, kctl);
if (ret < 0)
goto error;
card->user_ctl_count--;
error:
up_write(&card->controls_rwsem);
return ret;
}
/**
* snd_ctl_activate_id - activate/inactivate the control of the given id
* @card: the card instance
* @id: the control id to activate/inactivate
* @active: non-zero to activate
*
* Finds the control instance with the given id, and activate or
* inactivate the control together with notification, if changed.
*
* Return: 0 if unchanged, 1 if changed, or a negative error code on failure.
*/
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
int active)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto unlock;
}
index_offset = snd_ctl_get_ioff(kctl, &kctl->id);
vd = &kctl->vd[index_offset];
ret = 0;
if (active) {
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE))
goto unlock;
vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
} else {
if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)
goto unlock;
vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
}
ret = 1;
unlock:
up_write(&card->controls_rwsem);
if (ret > 0)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id);
return ret;
}
EXPORT_SYMBOL_GPL(snd_ctl_activate_id);
/**
* snd_ctl_rename_id - replace the id of a control on the card
* @card: the card instance
* @src_id: the old id
* @dst_id: the new id
*
* Finds the control with the old id from the card, and replaces the
* id with the new one.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
struct snd_ctl_elem_id *dst_id)
{
struct snd_kcontrol *kctl;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, src_id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
kctl->id = *dst_id;
kctl->id.numid = card->last_numid + 1;
card->last_numid += kctl->count;
up_write(&card->controls_rwsem);
return 0;
}
EXPORT_SYMBOL(snd_ctl_rename_id);
/**
* snd_ctl_find_numid - find the control instance with the given number-id
* @card: the card instance
* @numid: the number-id to search
*
* Finds the control instance with the given number-id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !numid))
return NULL;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid)
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_numid);
/**
* snd_ctl_find_id - find the control instance with the given id
* @card: the card instance
* @id: the id to search
*
* Finds the control instance with the given id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card,
struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !id))
return NULL;
if (id->numid != 0)
return snd_ctl_find_numid(card, id->numid);
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.iface != id->iface)
continue;
if (kctl->id.device != id->device)
continue;
if (kctl->id.subdevice != id->subdevice)
continue;
if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name)))
continue;
if (kctl->id.index > id->index)
continue;
if (kctl->id.index + kctl->count <= id->index)
continue;
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_id);
static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl,
unsigned int cmd, void __user *arg)
{
struct snd_ctl_card_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
down_read(&snd_ioctl_rwsem);
info->card = card->number;
strlcpy(info->id, card->id, sizeof(info->id));
strlcpy(info->driver, card->driver, sizeof(info->driver));
strlcpy(info->name, card->shortname, sizeof(info->name));
strlcpy(info->longname, card->longname, sizeof(info->longname));
strlcpy(info->mixername, card->mixername, sizeof(info->mixername));
strlcpy(info->components, card->components, sizeof(info->components));
up_read(&snd_ioctl_rwsem);
if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) {
kfree(info);
return -EFAULT;
}
kfree(info);
return 0;
}
static int snd_ctl_elem_list(struct snd_card *card,
struct snd_ctl_elem_list __user *_list)
{
struct list_head *plist;
struct snd_ctl_elem_list list;
struct snd_kcontrol *kctl;
struct snd_ctl_elem_id *dst, *id;
unsigned int offset, space, jidx;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
offset = list.offset;
space = list.space;
/* try limit maximum space */
if (space > 16384)
return -ENOMEM;
if (space > 0) {
/* allocate temporary buffer for atomic operation */
dst = vmalloc(space * sizeof(struct snd_ctl_elem_id));
if (dst == NULL)
return -ENOMEM;
down_read(&card->controls_rwsem);
list.count = card->controls_count;
plist = card->controls.next;
while (plist != &card->controls) {
if (offset == 0)
break;
kctl = snd_kcontrol(plist);
if (offset < kctl->count)
break;
offset -= kctl->count;
plist = plist->next;
}
list.used = 0;
id = dst;
while (space > 0 && plist != &card->controls) {
kctl = snd_kcontrol(plist);
for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) {
snd_ctl_build_ioff(id, kctl, jidx);
id++;
space--;
list.used++;
}
plist = plist->next;
offset = 0;
}
up_read(&card->controls_rwsem);
if (list.used > 0 &&
copy_to_user(list.pids, dst,
list.used * sizeof(struct snd_ctl_elem_id))) {
vfree(dst);
return -EFAULT;
}
vfree(dst);
} else {
down_read(&card->controls_rwsem);
list.count = card->controls_count;
up_read(&card->controls_rwsem);
}
if (copy_to_user(_list, &list, sizeof(list)))
return -EFAULT;
return 0;
}
static int snd_ctl_elem_info(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info *info)
{
struct snd_card *card = ctl->card;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &info->id);
if (kctl == NULL) {
up_read(&card->controls_rwsem);
return -ENOENT;
}
#ifdef CONFIG_SND_DEBUG
info->access = 0;
#endif
result = kctl->info(kctl, info);
if (result >= 0) {
snd_BUG_ON(info->access);
index_offset = snd_ctl_get_ioff(kctl, &info->id);
vd = &kctl->vd[index_offset];
snd_ctl_build_ioff(&info->id, kctl, index_offset);
info->access = vd->access;
if (vd->owner) {
info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK;
if (vd->owner == ctl)
info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER;
info->owner = pid_vnr(vd->owner->pid);
} else {
info->owner = -1;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info __user *_info)
{
struct snd_ctl_elem_info info;
int result;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
snd_power_lock(ctl->card);
result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_info(ctl, &info);
snd_power_unlock(ctl->card);
if (result >= 0)
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return result;
}
static int snd_ctl_elem_read(struct snd_card *card,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) &&
kctl->get != NULL) {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->get(kctl, control);
} else
result = -EPERM;
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_read_user(struct snd_card *card,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_read(card, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) ||
kctl->put == NULL ||
(file && vd->owner && vd->owner != file)) {
result = -EPERM;
} else {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->put(kctl, control);
}
if (result > 0) {
struct snd_ctl_elem_id id = control->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
return 0;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
struct snd_card *card;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
card = file->card;
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_write(card, file, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_lock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner != NULL)
result = -EBUSY;
else {
vd->owner = file;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner == NULL)
result = -EINVAL;
else if (vd->owner != file)
result = -EPERM;
else {
vd->owner = NULL;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
struct user_element {
struct snd_ctl_elem_info info;
struct snd_card *card;
void *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
unsigned long tlv_data_size; /* TLV data size */
void *priv_data; /* private data (like strings for enumerated type) */
};
static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
*uinfo = ue->info;
return 0;
}
static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
const char *names;
unsigned int item;
item = uinfo->value.enumerated.item;
*uinfo = ue->info;
item = min(item, uinfo->value.enumerated.items - 1);
uinfo->value.enumerated.item = item;
names = ue->priv_data;
for (; item > 0; --item)
names += strlen(names) + 1;
strcpy(uinfo->value.enumerated.name, names);
return 0;
}
static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int change;
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
if (change)
memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
mutex_unlock(&ue->card->user_ctl_lock);
} else {
int ret = 0;
mutex_lock(&ue->card->user_ctl_lock);
if (!ue->tlv_data_size || !ue->tlv_data) {
ret = -ENXIO;
goto err_unlock;
}
if (size < ue->tlv_data_size) {
ret = -ENOSPC;
goto err_unlock;
}
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
ret = -EFAULT;
err_unlock:
mutex_unlock(&ue->card->user_ctl_lock);
if (ret)
return ret;
}
return change;
}
static int snd_ctl_elem_init_enum_names(struct user_element *ue)
{
char *names, *p;
size_t buf_len, name_len;
unsigned int i;
const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr;
if (ue->info.value.enumerated.names_length > 64 * 1024)
return -EINVAL;
names = memdup_user((const void __user *)user_ptrval,
ue->info.value.enumerated.names_length);
if (IS_ERR(names))
return PTR_ERR(names);
/* check that there are enough valid names */
buf_len = ue->info.value.enumerated.names_length;
p = names;
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
kfree(names);
return -EINVAL;
}
p += name_len + 1;
buf_len -= name_len + 1;
}
ue->priv_data = names;
ue->info.value.enumerated.names_ptr = 0;
return 0;
}
static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
{
struct user_element *ue = kcontrol->private_data;
kfree(ue->tlv_data);
kfree(ue->priv_data);
kfree(ue);
}
static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
if (replace) {
err = snd_ctl_remove_user_ctl(file, &info->id);
if (err)
return err;
}
if (card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
}
static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
struct snd_ctl_elem_info __user *_info, int replace)
{
struct snd_ctl_elem_info info;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
return snd_ctl_elem_add(file, &info, replace);
}
static int snd_ctl_elem_remove(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_ctl_elem_id id;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
return snd_ctl_remove_user_ctl(file, &id);
}
static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr)
{
int subscribe;
if (get_user(subscribe, ptr))
return -EFAULT;
if (subscribe < 0) {
subscribe = file->subscribed;
if (put_user(subscribe, ptr))
return -EFAULT;
return 0;
}
if (subscribe) {
file->subscribed = 1;
return 0;
} else if (file->subscribed) {
snd_ctl_empty_read_queue(file);
file->subscribed = 0;
}
return 0;
}
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
struct snd_ctl_tlv __user *_tlv,
int op_flag)
{
struct snd_card *card = file->card;
struct snd_ctl_tlv tlv;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int len;
int err = 0;
if (copy_from_user(&tlv, _tlv, sizeof(tlv)))
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
err = -ENOENT;
goto __kctl_end;
}
if (kctl->tlv.p == NULL) {
err = -ENXIO;
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
(op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
(op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
if (vd->owner != NULL && vd->owner != file) {
err = -EPERM;
goto __kctl_end;
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
if (op_flag) {
err = -ENXIO;
goto __kctl_end;
}
len = kctl->tlv.p[1] + 2 * sizeof(unsigned int);
if (tlv.length < len) {
err = -ENOMEM;
goto __kctl_end;
}
if (copy_to_user(_tlv->tlv, kctl->tlv.p, len))
err = -EFAULT;
}
__kctl_end:
up_read(&card->controls_rwsem);
return err;
}
static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_ctl_file *ctl;
struct snd_card *card;
struct snd_kctl_ioctl *p;
void __user *argp = (void __user *)arg;
int __user *ip = argp;
int err;
ctl = file->private_data;
card = ctl->card;
if (snd_BUG_ON(!card))
return -ENXIO;
switch (cmd) {
case SNDRV_CTL_IOCTL_PVERSION:
return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0;
case SNDRV_CTL_IOCTL_CARD_INFO:
return snd_ctl_card_info(card, ctl, cmd, argp);
case SNDRV_CTL_IOCTL_ELEM_LIST:
return snd_ctl_elem_list(card, argp);
case SNDRV_CTL_IOCTL_ELEM_INFO:
return snd_ctl_elem_info_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_READ:
return snd_ctl_elem_read_user(card, argp);
case SNDRV_CTL_IOCTL_ELEM_WRITE:
return snd_ctl_elem_write_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_LOCK:
return snd_ctl_elem_lock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_UNLOCK:
return snd_ctl_elem_unlock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_ADD:
return snd_ctl_elem_add_user(ctl, argp, 0);
case SNDRV_CTL_IOCTL_ELEM_REPLACE:
return snd_ctl_elem_add_user(ctl, argp, 1);
case SNDRV_CTL_IOCTL_ELEM_REMOVE:
return snd_ctl_elem_remove(ctl, argp);
case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS:
return snd_ctl_subscribe_events(ctl, ip);
case SNDRV_CTL_IOCTL_TLV_READ:
return snd_ctl_tlv_ioctl(ctl, argp, 0);
case SNDRV_CTL_IOCTL_TLV_WRITE:
return snd_ctl_tlv_ioctl(ctl, argp, 1);
case SNDRV_CTL_IOCTL_TLV_COMMAND:
return snd_ctl_tlv_ioctl(ctl, argp, -1);
case SNDRV_CTL_IOCTL_POWER:
return -ENOPROTOOPT;
case SNDRV_CTL_IOCTL_POWER_STATE:
#ifdef CONFIG_PM
return put_user(card->power_state, ip) ? -EFAULT : 0;
#else
return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0;
#endif
}
down_read(&snd_ioctl_rwsem);
list_for_each_entry(p, &snd_control_ioctls, list) {
err = p->fioctl(card, ctl, cmd, arg);
if (err != -ENOIOCTLCMD) {
up_read(&snd_ioctl_rwsem);
return err;
}
}
up_read(&snd_ioctl_rwsem);
dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd);
return -ENOTTY;
}
static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
size_t count, loff_t * offset)
{
struct snd_ctl_file *ctl;
int err = 0;
ssize_t result = 0;
ctl = file->private_data;
if (snd_BUG_ON(!ctl || !ctl->card))
return -ENXIO;
if (!ctl->subscribed)
return -EBADFD;
if (count < sizeof(struct snd_ctl_event))
return -EINVAL;
spin_lock_irq(&ctl->read_lock);
while (count >= sizeof(struct snd_ctl_event)) {
struct snd_ctl_event ev;
struct snd_kctl_event *kev;
while (list_empty(&ctl->events)) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto __end_lock;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&ctl->read_lock);
schedule();
remove_wait_queue(&ctl->change_sleep, &wait);
if (ctl->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ctl->read_lock);
}
kev = snd_kctl_event(ctl->events.next);
ev.type = SNDRV_CTL_EVENT_ELEM;
ev.data.elem.mask = kev->mask;
ev.data.elem.id = kev->id;
list_del(&kev->list);
spin_unlock_irq(&ctl->read_lock);
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) {
err = -EFAULT;
goto __end;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(struct snd_ctl_event);
count -= sizeof(struct snd_ctl_event);
result += sizeof(struct snd_ctl_event);
}
__end_lock:
spin_unlock_irq(&ctl->read_lock);
__end:
return result > 0 ? result : err;
}
static unsigned int snd_ctl_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_ctl_file *ctl;
ctl = file->private_data;
if (!ctl->subscribed)
return 0;
poll_wait(file, &ctl->change_sleep, wait);
mask = 0;
if (!list_empty(&ctl->events))
mask |= POLLIN | POLLRDNORM;
return mask;
}
/*
* register the device-specific control-ioctls.
* called from each device manager like pcm.c, hwdep.c, etc.
*/
static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists)
{
struct snd_kctl_ioctl *pn;
pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL);
if (pn == NULL)
return -ENOMEM;
pn->fioctl = fcn;
down_write(&snd_ioctl_rwsem);
list_add_tail(&pn->list, lists);
up_write(&snd_ioctl_rwsem);
return 0;
}
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl_compat);
#endif
/*
* de-register the device-specific control-ioctls.
*/
static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn,
struct list_head *lists)
{
struct snd_kctl_ioctl *p;
if (snd_BUG_ON(!fcn))
return -EINVAL;
down_write(&snd_ioctl_rwsem);
list_for_each_entry(p, lists, list) {
if (p->fioctl == fcn) {
list_del(&p->list);
up_write(&snd_ioctl_rwsem);
kfree(p);
return 0;
}
}
up_write(&snd_ioctl_rwsem);
snd_BUG();
return -EINVAL;
}
int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat);
#endif
static int snd_ctl_fasync(int fd, struct file * file, int on)
{
struct snd_ctl_file *ctl;
ctl = file->private_data;
return fasync_helper(fd, file, on, &ctl->fasync);
}
/*
* ioctl32 compat
*/
#ifdef CONFIG_COMPAT
#include "control_compat.c"
#else
#define snd_ctl_ioctl_compat NULL
#endif
/*
* INIT PART
*/
static const struct file_operations snd_ctl_f_ops =
{
.owner = THIS_MODULE,
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
.llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
.fasync = snd_ctl_fasync,
};
/*
* registration of the control device
*/
static int snd_ctl_dev_register(struct snd_device *device)
{
struct snd_card *card = device->device_data;
int err, cardnum;
char name[16];
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
sprintf(name, "controlC%i", cardnum);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1,
&snd_ctl_f_ops, card, name)) < 0)
return err;
return 0;
}
/*
* disconnection of the control device
*/
static int snd_ctl_dev_disconnect(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_ctl_file *ctl;
int err, cardnum;
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(ctl, &card->ctl_files, list) {
wake_up(&ctl->change_sleep);
kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
}
read_unlock(&card->ctl_files_rwlock);
if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL,
card, -1)) < 0)
return err;
return 0;
}
/*
* free all controls
*/
static int snd_ctl_dev_free(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_kcontrol *control;
down_write(&card->controls_rwsem);
while (!list_empty(&card->controls)) {
control = snd_kcontrol(card->controls.next);
snd_ctl_remove(card, control);
}
up_write(&card->controls_rwsem);
return 0;
}
/*
* create control core:
* called from init.c
*/
int snd_ctl_create(struct snd_card *card)
{
static struct snd_device_ops ops = {
.dev_free = snd_ctl_dev_free,
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
if (snd_BUG_ON(!card))
return -ENXIO;
return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
}
/*
* Frequently used control callbacks/helpers
*/
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_mono_info);
int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
/**
* snd_ctl_enum_info - fills the info structure for an enumerated control
* @info: the structure to be filled
* @channels: the number of the control's channels; often one
* @items: the number of control values; also the size of @names
* @names: an array containing the names of all control values
*
* Sets all required fields in @info to their appropriate values.
* If the control's accessibility is not the default (readable and writable),
* the caller has to fill @info->access.
*
* Return: Zero.
*/
int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
unsigned int items, const char *const names[])
{
info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
info->count = channels;
info->value.enumerated.items = items;
if (info->value.enumerated.item >= items)
info->value.enumerated.item = items - 1;
strlcpy(info->value.enumerated.name,
names[info->value.enumerated.item],
sizeof(info->value.enumerated.name));
return 0;
}
EXPORT_SYMBOL(snd_ctl_enum_info);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2218_0 |
crossvul-cpp_data_good_3447_1 | /*
* sound/oss/midi_synth.c
*
* High level midi sequencer manager for dumb MIDI interfaces.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*/
/*
* Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
* Andrew Veliath : fixed running status in MIDI input state machine
*/
#define USE_SEQ_MACROS
#define USE_SIMPLE_MACROS
#include "sound_config.h"
#define _MIDI_SYNTH_C_
#include "midi_synth.h"
static int midi2synth[MAX_MIDI_DEV];
static int sysex_state[MAX_MIDI_DEV] =
{0};
static unsigned char prev_out_status[MAX_MIDI_DEV];
#define STORE(cmd) \
{ \
int len; \
unsigned char obuf[8]; \
cmd; \
seq_input_event(obuf, len); \
}
#define _seqbuf obuf
#define _seqbufptr 0
#define _SEQ_ADVBUF(x) len=x
void
do_midi_msg(int synthno, unsigned char *msg, int mlen)
{
switch (msg[0] & 0xf0)
{
case 0x90:
if (msg[2] != 0)
{
STORE(SEQ_START_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2]));
break;
}
msg[2] = 64;
case 0x80:
STORE(SEQ_STOP_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2]));
break;
case 0xA0:
STORE(SEQ_KEY_PRESSURE(synthno, msg[0] & 0x0f, msg[1], msg[2]));
break;
case 0xB0:
STORE(SEQ_CONTROL(synthno, msg[0] & 0x0f,
msg[1], msg[2]));
break;
case 0xC0:
STORE(SEQ_SET_PATCH(synthno, msg[0] & 0x0f, msg[1]));
break;
case 0xD0:
STORE(SEQ_CHN_PRESSURE(synthno, msg[0] & 0x0f, msg[1]));
break;
case 0xE0:
STORE(SEQ_BENDER(synthno, msg[0] & 0x0f,
(msg[1] & 0x7f) | ((msg[2] & 0x7f) << 7)));
break;
default:
/* printk( "MPU: Unknown midi channel message %02x\n", msg[0]); */
;
}
}
EXPORT_SYMBOL(do_midi_msg);
static void
midi_outc(int midi_dev, int data)
{
int timeout;
for (timeout = 0; timeout < 3200; timeout++)
if (midi_devs[midi_dev]->outputc(midi_dev, (unsigned char) (data & 0xff)))
{
if (data & 0x80) /*
* Status byte
*/
prev_out_status[midi_dev] =
(unsigned char) (data & 0xff); /*
* Store for running status
*/
return; /*
* Mission complete
*/
}
/*
* Sorry! No space on buffers.
*/
printk("Midi send timed out\n");
}
static int
prefix_cmd(int midi_dev, unsigned char status)
{
if ((char *) midi_devs[midi_dev]->prefix_cmd == NULL)
return 1;
return midi_devs[midi_dev]->prefix_cmd(midi_dev, status);
}
static void
midi_synth_input(int orig_dev, unsigned char data)
{
int dev;
struct midi_input_info *inc;
static unsigned char len_tab[] = /* # of data bytes following a status
*/
{
2, /* 8x */
2, /* 9x */
2, /* Ax */
2, /* Bx */
1, /* Cx */
1, /* Dx */
2, /* Ex */
0 /* Fx */
};
if (orig_dev < 0 || orig_dev > num_midis || midi_devs[orig_dev] == NULL)
return;
if (data == 0xfe) /* Ignore active sensing */
return;
dev = midi2synth[orig_dev];
inc = &midi_devs[orig_dev]->in_info;
switch (inc->m_state)
{
case MST_INIT:
if (data & 0x80) /* MIDI status byte */
{
if ((data & 0xf0) == 0xf0) /* Common message */
{
switch (data)
{
case 0xf0: /* Sysex */
inc->m_state = MST_SYSEX;
break; /* Sysex */
case 0xf1: /* MTC quarter frame */
case 0xf3: /* Song select */
inc->m_state = MST_DATA;
inc->m_ptr = 1;
inc->m_left = 1;
inc->m_buf[0] = data;
break;
case 0xf2: /* Song position pointer */
inc->m_state = MST_DATA;
inc->m_ptr = 1;
inc->m_left = 2;
inc->m_buf[0] = data;
break;
default:
inc->m_buf[0] = data;
inc->m_ptr = 1;
do_midi_msg(dev, inc->m_buf, inc->m_ptr);
inc->m_ptr = 0;
inc->m_left = 0;
}
} else
{
inc->m_state = MST_DATA;
inc->m_ptr = 1;
inc->m_left = len_tab[(data >> 4) - 8];
inc->m_buf[0] = inc->m_prev_status = data;
}
} else if (inc->m_prev_status & 0x80) {
/* Data byte (use running status) */
inc->m_ptr = 2;
inc->m_buf[1] = data;
inc->m_buf[0] = inc->m_prev_status;
inc->m_left = len_tab[(inc->m_buf[0] >> 4) - 8] - 1;
if (inc->m_left > 0)
inc->m_state = MST_DATA; /* Not done yet */
else {
inc->m_state = MST_INIT;
do_midi_msg(dev, inc->m_buf, inc->m_ptr);
inc->m_ptr = 0;
}
}
break; /* MST_INIT */
case MST_DATA:
inc->m_buf[inc->m_ptr++] = data;
if (--inc->m_left <= 0)
{
inc->m_state = MST_INIT;
do_midi_msg(dev, inc->m_buf, inc->m_ptr);
inc->m_ptr = 0;
}
break; /* MST_DATA */
case MST_SYSEX:
if (data == 0xf7) /* Sysex end */
{
inc->m_state = MST_INIT;
inc->m_left = 0;
inc->m_ptr = 0;
}
break; /* MST_SYSEX */
default:
printk("MIDI%d: Unexpected state %d (%02x)\n", orig_dev, inc->m_state, (int) data);
inc->m_state = MST_INIT;
}
}
static void
leave_sysex(int dev)
{
int orig_dev = synth_devs[dev]->midi_dev;
int timeout = 0;
if (!sysex_state[dev])
return;
sysex_state[dev] = 0;
while (!midi_devs[orig_dev]->outputc(orig_dev, 0xf7) &&
timeout < 1000)
timeout++;
sysex_state[dev] = 0;
}
static void
midi_synth_output(int dev)
{
/*
* Currently NOP
*/
}
int midi_synth_ioctl(int dev, unsigned int cmd, void __user *arg)
{
/*
* int orig_dev = synth_devs[dev]->midi_dev;
*/
switch (cmd) {
case SNDCTL_SYNTH_INFO:
if (__copy_to_user(arg, synth_devs[dev]->info, sizeof(struct synth_info)))
return -EFAULT;
return 0;
case SNDCTL_SYNTH_MEMAVL:
return 0x7fffffff;
default:
return -EINVAL;
}
}
EXPORT_SYMBOL(midi_synth_ioctl);
int
midi_synth_kill_note(int dev, int channel, int note, int velocity)
{
int orig_dev = synth_devs[dev]->midi_dev;
int msg, chn;
if (note < 0 || note > 127)
return 0;
if (channel < 0 || channel > 15)
return 0;
if (velocity < 0)
velocity = 0;
if (velocity > 127)
velocity = 127;
leave_sysex(dev);
msg = prev_out_status[orig_dev] & 0xf0;
chn = prev_out_status[orig_dev] & 0x0f;
if (chn == channel && ((msg == 0x90 && velocity == 64) || msg == 0x80))
{ /*
* Use running status
*/
if (!prefix_cmd(orig_dev, note))
return 0;
midi_outc(orig_dev, note);
if (msg == 0x90) /*
* Running status = Note on
*/
midi_outc(orig_dev, 0); /*
* Note on with velocity 0 == note
* off
*/
else
midi_outc(orig_dev, velocity);
} else
{
if (velocity == 64)
{
if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f)))
return 0;
midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /*
* Note on
*/
midi_outc(orig_dev, note);
midi_outc(orig_dev, 0); /*
* Zero G
*/
} else
{
if (!prefix_cmd(orig_dev, 0x80 | (channel & 0x0f)))
return 0;
midi_outc(orig_dev, 0x80 | (channel & 0x0f)); /*
* Note off
*/
midi_outc(orig_dev, note);
midi_outc(orig_dev, velocity);
}
}
return 0;
}
EXPORT_SYMBOL(midi_synth_kill_note);
int
midi_synth_set_instr(int dev, int channel, int instr_no)
{
int orig_dev = synth_devs[dev]->midi_dev;
if (instr_no < 0 || instr_no > 127)
instr_no = 0;
if (channel < 0 || channel > 15)
return 0;
leave_sysex(dev);
if (!prefix_cmd(orig_dev, 0xc0 | (channel & 0x0f)))
return 0;
midi_outc(orig_dev, 0xc0 | (channel & 0x0f)); /*
* Program change
*/
midi_outc(orig_dev, instr_no);
return 0;
}
EXPORT_SYMBOL(midi_synth_set_instr);
int
midi_synth_start_note(int dev, int channel, int note, int velocity)
{
int orig_dev = synth_devs[dev]->midi_dev;
int msg, chn;
if (note < 0 || note > 127)
return 0;
if (channel < 0 || channel > 15)
return 0;
if (velocity < 0)
velocity = 0;
if (velocity > 127)
velocity = 127;
leave_sysex(dev);
msg = prev_out_status[orig_dev] & 0xf0;
chn = prev_out_status[orig_dev] & 0x0f;
if (chn == channel && msg == 0x90)
{ /*
* Use running status
*/
if (!prefix_cmd(orig_dev, note))
return 0;
midi_outc(orig_dev, note);
midi_outc(orig_dev, velocity);
} else
{
if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f)))
return 0;
midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /*
* Note on
*/
midi_outc(orig_dev, note);
midi_outc(orig_dev, velocity);
}
return 0;
}
EXPORT_SYMBOL(midi_synth_start_note);
void
midi_synth_reset(int dev)
{
leave_sysex(dev);
}
EXPORT_SYMBOL(midi_synth_reset);
int
midi_synth_open(int dev, int mode)
{
int orig_dev = synth_devs[dev]->midi_dev;
int err;
struct midi_input_info *inc;
if (orig_dev < 0 || orig_dev >= num_midis || midi_devs[orig_dev] == NULL)
return -ENXIO;
midi2synth[orig_dev] = dev;
sysex_state[dev] = 0;
prev_out_status[orig_dev] = 0;
if ((err = midi_devs[orig_dev]->open(orig_dev, mode,
midi_synth_input, midi_synth_output)) < 0)
return err;
inc = &midi_devs[orig_dev]->in_info;
/* save_flags(flags);
cli();
don't know against what irqhandler to protect*/
inc->m_busy = 0;
inc->m_state = MST_INIT;
inc->m_ptr = 0;
inc->m_left = 0;
inc->m_prev_status = 0x00;
/* restore_flags(flags); */
return 1;
}
EXPORT_SYMBOL(midi_synth_open);
void
midi_synth_close(int dev)
{
int orig_dev = synth_devs[dev]->midi_dev;
leave_sysex(dev);
/*
* Shut up the synths by sending just single active sensing message.
*/
midi_devs[orig_dev]->outputc(orig_dev, 0xfe);
midi_devs[orig_dev]->close(orig_dev);
}
EXPORT_SYMBOL(midi_synth_close);
void
midi_synth_hw_control(int dev, unsigned char *event)
{
}
EXPORT_SYMBOL(midi_synth_hw_control);
int
midi_synth_load_patch(int dev, int format, const char __user *addr,
int count, int pmgr_flag)
{
int orig_dev = synth_devs[dev]->midi_dev;
struct sysex_info sysex;
int i;
unsigned long left, src_offs, eox_seen = 0;
int first_byte = 1;
int hdr_size = (unsigned long) &sysex.data[0] - (unsigned long) &sysex;
leave_sysex(dev);
if (!prefix_cmd(orig_dev, 0xf0))
return 0;
/* Invalid patch format */
if (format != SYSEX_PATCH)
return -EINVAL;
/* Patch header too short */
if (count < hdr_size)
return -EINVAL;
count -= hdr_size;
/*
* Copy the header from user space
*/
if (copy_from_user(&sysex, addr, hdr_size))
return -EFAULT;
/* Sysex record too short */
if ((unsigned)count < (unsigned)sysex.len)
sysex.len = count;
left = sysex.len;
src_offs = 0;
for (i = 0; i < left && !signal_pending(current); i++)
{
unsigned char data;
if (get_user(data,
(unsigned char __user *)(addr + hdr_size + i)))
return -EFAULT;
eox_seen = (i > 0 && data & 0x80); /* End of sysex */
if (eox_seen && data != 0xf7)
data = 0xf7;
if (i == 0)
{
if (data != 0xf0)
{
printk(KERN_WARNING "midi_synth: Sysex start missing\n");
return -EINVAL;
}
}
while (!midi_devs[orig_dev]->outputc(orig_dev, (unsigned char) (data & 0xff)) &&
!signal_pending(current))
schedule();
if (!first_byte && data & 0x80)
return 0;
first_byte = 0;
}
if (!eox_seen)
midi_outc(orig_dev, 0xf7);
return 0;
}
EXPORT_SYMBOL(midi_synth_load_patch);
void midi_synth_panning(int dev, int channel, int pressure)
{
}
EXPORT_SYMBOL(midi_synth_panning);
void midi_synth_aftertouch(int dev, int channel, int pressure)
{
int orig_dev = synth_devs[dev]->midi_dev;
int msg, chn;
if (pressure < 0 || pressure > 127)
return;
if (channel < 0 || channel > 15)
return;
leave_sysex(dev);
msg = prev_out_status[orig_dev] & 0xf0;
chn = prev_out_status[orig_dev] & 0x0f;
if (msg != 0xd0 || chn != channel) /*
* Test for running status
*/
{
if (!prefix_cmd(orig_dev, 0xd0 | (channel & 0x0f)))
return;
midi_outc(orig_dev, 0xd0 | (channel & 0x0f)); /*
* Channel pressure
*/
} else if (!prefix_cmd(orig_dev, pressure))
return;
midi_outc(orig_dev, pressure);
}
EXPORT_SYMBOL(midi_synth_aftertouch);
void
midi_synth_controller(int dev, int channel, int ctrl_num, int value)
{
int orig_dev = synth_devs[dev]->midi_dev;
int chn, msg;
if (ctrl_num < 0 || ctrl_num > 127)
return;
if (channel < 0 || channel > 15)
return;
leave_sysex(dev);
msg = prev_out_status[orig_dev] & 0xf0;
chn = prev_out_status[orig_dev] & 0x0f;
if (msg != 0xb0 || chn != channel)
{
if (!prefix_cmd(orig_dev, 0xb0 | (channel & 0x0f)))
return;
midi_outc(orig_dev, 0xb0 | (channel & 0x0f));
} else if (!prefix_cmd(orig_dev, ctrl_num))
return;
midi_outc(orig_dev, ctrl_num);
midi_outc(orig_dev, value & 0x7f);
}
EXPORT_SYMBOL(midi_synth_controller);
void
midi_synth_bender(int dev, int channel, int value)
{
int orig_dev = synth_devs[dev]->midi_dev;
int msg, prev_chn;
if (channel < 0 || channel > 15)
return;
if (value < 0 || value > 16383)
return;
leave_sysex(dev);
msg = prev_out_status[orig_dev] & 0xf0;
prev_chn = prev_out_status[orig_dev] & 0x0f;
if (msg != 0xd0 || prev_chn != channel) /*
* Test for running status
*/
{
if (!prefix_cmd(orig_dev, 0xe0 | (channel & 0x0f)))
return;
midi_outc(orig_dev, 0xe0 | (channel & 0x0f));
} else if (!prefix_cmd(orig_dev, value & 0x7f))
return;
midi_outc(orig_dev, value & 0x7f);
midi_outc(orig_dev, (value >> 7) & 0x7f);
}
EXPORT_SYMBOL(midi_synth_bender);
void
midi_synth_setup_voice(int dev, int voice, int channel)
{
}
EXPORT_SYMBOL(midi_synth_setup_voice);
int
midi_synth_send_sysex(int dev, unsigned char *bytes, int len)
{
int orig_dev = synth_devs[dev]->midi_dev;
int i;
for (i = 0; i < len; i++)
{
switch (bytes[i])
{
case 0xf0: /* Start sysex */
if (!prefix_cmd(orig_dev, 0xf0))
return 0;
sysex_state[dev] = 1;
break;
case 0xf7: /* End sysex */
if (!sysex_state[dev]) /* Orphan sysex end */
return 0;
sysex_state[dev] = 0;
break;
default:
if (!sysex_state[dev])
return 0;
if (bytes[i] & 0x80) /* Error. Another message before sysex end */
{
bytes[i] = 0xf7; /* Sysex end */
sysex_state[dev] = 0;
}
}
if (!midi_devs[orig_dev]->outputc(orig_dev, bytes[i]))
{
/*
* Hardware level buffer is full. Abort the sysex message.
*/
int timeout = 0;
bytes[i] = 0xf7;
sysex_state[dev] = 0;
while (!midi_devs[orig_dev]->outputc(orig_dev, bytes[i]) &&
timeout < 1000)
timeout++;
}
if (!sysex_state[dev])
return 0;
}
return 0;
}
EXPORT_SYMBOL(midi_synth_send_sysex);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3447_1 |
crossvul-cpp_data_bad_5640_0 | /*
* Performance events core code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include "internal.h"
#include <asm/irq_regs.h>
struct remote_function_call {
struct task_struct *p;
int (*func)(void *info);
void *info;
int ret;
};
static void remote_function(void *data)
{
struct remote_function_call *tfc = data;
struct task_struct *p = tfc->p;
if (p) {
tfc->ret = -EAGAIN;
if (task_cpu(p) != smp_processor_id() || !task_curr(p))
return;
}
tfc->ret = tfc->func(tfc->info);
}
/**
* task_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*
* returns: @func return value, or
* -ESRCH - when the process isn't running
* -EAGAIN - when the process moved away
*/
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = p,
.func = func,
.info = info,
.ret = -ESRCH, /* No such (running) process */
};
if (task_curr(p))
smp_call_function_single(task_cpu(p), remote_function, &data, 1);
return data.ret;
}
/**
* cpu_function_call - call a function on the cpu
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func on the remote cpu.
*
* returns: @func return value or -ENXIO when the cpu is offline
*/
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = NULL,
.func = func,
.info = info,
.ret = -ENXIO, /* No such CPU */
};
smp_call_function_single(cpu, remote_function, &data, 1);
return data.ret;
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP)
/*
* branch priv levels that need permission checks
*/
#define PERF_SAMPLE_BRANCH_PERM_PLM \
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
/*
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
/*
* max perf event sample rate
*/
#define DEFAULT_MAX_SAMPLE_RATE 100000
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly =
DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
return 0;
}
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb);
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
{
return "pmu";
}
static inline u64 perf_clock(void)
{
return local_clock();
}
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx)
raw_spin_lock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
raw_spin_unlock(&ctx->lock);
raw_spin_unlock(&cpuctx->ctx.lock);
}
#ifdef CONFIG_CGROUP_PERF
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
return container_of(task_subsys_state(task, perf_subsys_id),
struct perf_cgroup, css);
}
static inline bool
perf_cgroup_match(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
return !event->cgrp || event->cgrp == cpuctx->cgrp;
}
static inline bool perf_tryget_cgroup(struct perf_event *event)
{
return css_tryget(&event->cgrp->css);
}
static inline void perf_put_cgroup(struct perf_event *event)
{
css_put(&event->cgrp->css);
}
static inline void perf_detach_cgroup(struct perf_event *event)
{
perf_put_cgroup(event);
event->cgrp = NULL;
}
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
return t->time;
}
static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
struct perf_cgroup *cgrp_out = cpuctx->cgrp;
if (cgrp_out)
__update_cgrp_time(cgrp_out);
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup *cgrp;
/*
* ensure we access cgroup data only when needed and
* when we know the cgroup is pinned (css_get)
*/
if (!is_cgroup_event(event))
return;
cgrp = perf_cgroup_from_task(current);
/*
* Do not update time when cgroup is not active
*/
if (cgrp == event->cgrp)
__update_cgrp_time(event->cgrp);
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
struct perf_cgroup *cgrp;
struct perf_cgroup_info *info;
/*
* ctx->lock held by caller
* ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get)
*/
if (!task || !ctx->nr_cgroups)
return;
cgrp = perf_cgroup_from_task(task);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
/*
* reschedule events based on the cgroup constraint of task.
*
* mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next
*/
void perf_cgroup_switch(struct task_struct *task, int mode)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/*
* disable interrupts to avoid geting nr_cgroup
* changes via __perf_event_disable(). Also
* avoids preemption.
*/
local_irq_save(flags);
/*
* we reschedule only in the presence of cgroup
* constrained events.
*/
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
continue; /* ensure we process each cpuctx once */
/*
* perf_cgroup_events says at least one
* context on this CPU has cgroup events.
*
* ctx->nr_cgroups reports the number of cgroup
* events for a context.
*/
if (cpuctx->ctx.nr_cgroups > 0) {
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
*/
cpuctx->cgrp = NULL;
}
if (mode & PERF_CGROUP_SWIN) {
WARN_ON_ONCE(cpuctx->cgrp);
/*
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
*/
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
cgrp2 = perf_cgroup_from_task(next);
/*
* only schedule out current cgroup events if we know
* that we are switching to a different cgroup. Otherwise,
* do no touch the cgroup events.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev);
/*
* only need to schedule in cgroup events if we are changing
* cgroup during ctxsw. Cgroup events were not scheduled
* out of ctxsw out if that was not the case.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
struct perf_cgroup *cgrp;
struct cgroup_subsys_state *css;
struct fd f = fdget(fd);
int ret = 0;
if (!f.file)
return -EBADF;
css = cgroup_css_from_dir(f.file, perf_subsys_id);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
}
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
/* must be done before we fput() the file */
if (!perf_tryget_cgroup(event)) {
event->cgrp = NULL;
ret = -ENOENT;
goto out;
}
/*
* all events in a group must monitor
* the same cgroup because a task belongs
* to only one perf cgroup at a time
*/
if (group_leader && group_leader->cgrp != cgrp) {
perf_detach_cgroup(event);
ret = -EINVAL;
}
out:
fdput(f);
return ret;
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
event->shadow_ctx_time = now - t->timestamp;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
/*
* when the current task's perf cgroup does not match
* the event's, we need to remember to call the
* perf_mark_enable() function the first time a task with
* a matching perf cgroup is scheduled in.
*/
if (is_cgroup_event(event) && !perf_cgroup_match(event))
event->cgrp_defer_enabled = 1;
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
if (!event->cgrp_defer_enabled)
return;
event->cgrp_defer_enabled = 0;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
sub->cgrp_defer_enabled = 0;
}
}
}
#else /* !CONFIG_CGROUP_PERF */
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
}
void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
}
#endif
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!(*count)++)
pmu->pmu_disable(pmu);
}
void perf_pmu_enable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!--(*count))
pmu->pmu_enable(pmu);
}
static DEFINE_PER_CPU(struct list_head, rotation_list);
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_pmu_rotate_start(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct list_head *head = &__get_cpu_var(rotation_list);
WARN_ON(!irqs_disabled());
if (list_empty(&cpuctx->rotation_list))
list_add(&cpuctx->rotation_list, head);
}
static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task)
put_task_struct(ctx->task);
kfree_rcu(ctx, rcu_head);
}
}
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
put_ctx(ctx->parent_ctx);
ctx->parent_ctx = NULL;
}
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_tgid_nr_ns(p, event->ns);
}
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_pid_nr_ns(p, event->ns);
}
/*
* If we inherit events we want to return the parent event id
* to userspace.
*/
static u64 primary_event_id(struct perf_event *event)
{
u64 id = event->id;
if (event->parent)
id = event->parent->id;
return id;
}
/*
* Get the perf_event_context for a task and lock it.
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
rcu_read_lock();
retry:
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
* perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
rcu_read_unlock();
return ctx;
}
/*
* Get the context for a task and increment its pin_count so it
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
{
struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
* Update the record of the current time in a context.
*/
static void update_context_time(struct perf_event_context *ctx)
{
u64 now = perf_clock();
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx ? ctx->time : 0;
}
/*
* Update the total_time_enabled and total_time_running fields for a event.
* The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
* tasks were in the monitored cgroup. This is
* independent of the activity of the context as
* there may be a mix of cgroup and non-cgroup events.
*
* That is why we treat cgroup events differently
* here.
*/
if (is_cgroup_event(event))
run_end = perf_cgroup_event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
run_end = event->tstamp_stopped;
event->total_time_enabled = run_end - event->tstamp_enabled;
if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped;
else
run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
}
/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;
update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
/*
* If we're a stand alone event or group leader, we go to the context
* list, group events are kept attached to the group so that
* perf_group_detach can, at all times, locate all siblings.
*/
if (event->group_leader == event) {
struct list_head *list;
if (is_software_event(event))
event->group_flags |= PERF_GROUP_SOFTWARE;
list = ctx_group_list(event, ctx);
list_add_tail(&event->group_entry, list);
}
if (is_cgroup_event(event))
ctx->nr_cgroups++;
if (has_branch_stack(event))
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
if (!ctx->nr_events)
perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
}
/*
* Initialize event state based on the perf_event_attr::disabled.
*/
static inline void perf_event__state_init(struct perf_event *event)
{
event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
PERF_EVENT_STATE_INACTIVE;
}
/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__read_size(struct perf_event *event)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += event->group_leader->nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
}
static void perf_event__header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
perf_event__read_size(event);
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
if (sample_type & PERF_SAMPLE_ADDR)
size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
event->header_size = size;
}
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu_entry);
event->id_header_size = size;
}
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
/*
* We can have double attach due to group movement in perf_event_open.
*/
if (event->attach_state & PERF_ATTACH_GROUP)
return;
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
return;
if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
!is_software_event(event))
group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
perf_event__header_size(group_leader);
list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
perf_event__header_size(pos);
}
/*
* Remove a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
if (is_cgroup_event(event)) {
ctx->nr_cgroups--;
cpuctx = __get_cpu_context(ctx);
/*
* if there are no more cgroup events
* then cler cgrp to avoid stale pointer
* in update_cgrp_time_from_cpuctx()
*/
if (!ctx->nr_cgroups)
cpuctx->cgrp = NULL;
}
if (has_branch_stack(event))
ctx->nr_branch_stack--;
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
list_del_init(&event->group_entry);
update_group_times(event);
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
* returned on read(). The only way to get out
* of error state is by explicit re-enabling
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
}
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
/*
* If this is a sibling, remove it from its group.
*/
if (event->group_leader != event) {
list_del_init(&event->group_entry);
event->group_leader->nr_siblings--;
goto out;
}
if (!list_empty(&event->group_entry))
list = &event->group_entry;
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
sibling->group_flags = event->group_flags;
}
out:
perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
perf_event__header_size(tmp);
}
static inline int
event_filter_match(struct perf_event *event)
{
return (event->cpu == -1 || event->cpu == smp_processor_id())
&& perf_cgroup_match(event);
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
u64 delta;
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
static void
group_sched_out(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
/*
* Schedule out siblings (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
/*
* Cross CPU call to remove a performance event
*
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
static int __perf_remove_from_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
list_del_event(event, ctx);
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
ctx->is_active = 0;
cpuctx->task_ctx = NULL;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
* call when the task is on a CPU.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This is OK when called from perf_release since
* that only calls us on the top-level context, which can't be a clone.
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
static void perf_remove_from_context(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
lockdep_assert_held(&ctx->mutex);
if (!task) {
/*
* Per cpu events are removed via an smp call and
* the removal is always successful.
*/
cpu_function_call(event->cpu, __perf_remove_from_context, event);
return;
}
retry:
if (!task_function_call(task, __perf_remove_from_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
*/
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Cross CPU call to disable a performance event
*/
int __perf_event_disable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a per-task event, need to check whether this
* event's task is the current task on this cpu.
*
* Can trigger due to concurrent perf_event_context_sched_out()
* flipping contexts around.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return -EINVAL;
raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
* If it is in error state, leave it in error state.
*/
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
update_group_times(event);
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
else
event_sched_out(event, cpuctx, ctx);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Disable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisifed when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in sync_child_event.
* When called from perf_pending_event it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Disable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}
retry:
if (!task_function_call(task, __perf_event_disable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, tstamp);
else
event->shadow_ctx_time = tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
/*
* Unthrottle events, since we scheduled we might have missed several
* ticks already, also for a heavily scheduling task there is little
* guarantee it'll get a tick in a timely manner.
*/
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
/*
* The new state must be visible before we turn it on in the hardware:
*/
smp_wmb();
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
return -EAGAIN;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
return 0;
}
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
u64 now = ctx->time;
bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
if (!pmu->commit_txn(pmu))
return 0;
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
* The events up to the failed event are scheduled out normally,
* tstamp_stopped will be updated.
*
* The failed events and the remaining siblings need to have
* their timings updated as if they had gone thru event_sched_in()
* and event_sched_out(). This is required to get consistent timings
* across the group. This also takes care of the case where the group
* could never be scheduled by ensuring tstamp_stopped is set to mark
* the time the event was actually stopped, such that time delta
* calculation in update_event_times() is correct.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
simulate = true;
if (simulate) {
event->tstamp_running += now - event->tstamp_stopped;
event->tstamp_stopped = now;
} else {
event_sched_out(event, cpuctx, ctx);
}
}
event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Work out whether we can put this event group on the CPU now.
*/
static int group_can_go_on(struct perf_event *event,
struct perf_cpu_context *cpuctx,
int can_add_hw)
{
/*
* Groups consisting entirely of software events can always go on.
*/
if (event->group_flags & PERF_GROUP_SOFTWARE)
return 1;
/*
* If an exclusive group is already on, no other hardware
* events can go on.
*/
if (cpuctx->exclusive)
return 0;
/*
* If this group is exclusive and there are already
* events on the CPU, it can't go on.
*/
if (event->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
* to go on.
*/
return can_add_hw;
}
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx);
perf_group_attach(event);
event->tstamp_enabled = tstamp;
event->tstamp_running = tstamp;
event->tstamp_stopped = tstamp;
}
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
{
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}
/*
* Cross CPU call to install and enable a performance event
*
* Must be called with ctx->mutex held
*/
static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
struct task_struct *task = current;
perf_ctx_lock(cpuctx, task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
/*
* If there was an active task_ctx schedule it out.
*/
if (task_ctx)
task_ctx_sched_out(task_ctx);
/*
* If the context we're installing events in is not the
* active task_ctx, flip them.
*/
if (ctx->task && task_ctx != ctx) {
if (task_ctx)
raw_spin_unlock(&task_ctx->lock);
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
}
if (task_ctx) {
cpuctx->task_ctx = task_ctx;
task = task_ctx->task;
}
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
update_context_time(ctx);
/*
* update cgrp time only if current cgrp
* matches event->cgrp. Must be done before
* calling add_event_to_ctx()
*/
update_cgrp_time_from_event(event);
add_event_to_ctx(event, ctx);
/*
* Schedule everything back in
*/
perf_event_sched_in(cpuctx, task_ctx, task);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, task_ctx);
return 0;
}
/*
* Attach a performance event to a context
*
* First we add the event to the list with the hardware enable bit
* in event->hw_config cleared.
*
* If the event is attached to a task which is on a CPU we use a smp
* call to enable it in the task context. The task might have been
* scheduled away, but we check this in the smp call again.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
struct task_struct *task = ctx->task;
lockdep_assert_held(&ctx->mutex);
event->ctx = ctx;
if (event->cpu != -1)
event->cpu = cpu;
if (!task) {
/*
* Per cpu events are installed via an smp call and
* the install is always successful.
*/
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
retry:
if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* Since the task isn't running, its safe to add the event, us holding
* the ctx->lock ensures the task won't get scheduled in.
*/
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
}
}
/*
* Cross CPU call to enable a performance event
*/
static int __perf_event_enable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
if (WARN_ON_ONCE(!ctx->is_active))
return -EINVAL;
raw_spin_lock(&ctx->lock);
update_context_time(ctx);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto unlock;
/*
* set current task's cgroup time reference point
*/
perf_cgroup_set_timestamp(current, ctx);
__perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
goto unlock;
}
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
if (event == leader)
err = group_sched_in(event, cpuctx, ctx);
else
err = event_sched_in(event, cpuctx, ctx);
}
if (err) {
/*
* If this event can't go on and it's part of a
* group, then the whole group has to come off.
*/
if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_EVENT_STATE_ERROR;
}
}
unlock:
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Enable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}
raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
*/
if (event->attr.inherit || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
perf_event_enable(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event *event;
int is_active = ctx->is_active;
ctx->is_active &= ~event_type;
if (likely(!ctx->nr_events))
return;
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
if (!ctx->nr_active)
return;
perf_pmu_disable(ctx->pmu);
if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
perf_pmu_enable(ctx->pmu);
}
/*
* Test whether two contexts are equivalent, i.e. whether they
* have both been cloned from the same version of the same context
* and they both have the same number of enabled events.
* If the number of enabled events is the same, then the set
* of enabled events should be the same, because these are both
* inherited contexts, therefore we can't access individual events
* in them directly with an fd; we can only enable/disable all
* events via prctl, or enable/disable all events in a family
* via ioctl, which will have the same effect on both contexts.
*/
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
&& ctx1->parent_gen == ctx2->parent_gen
&& !ctx1->pin_count && !ctx2->pin_count;
}
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
u64 value;
if (!event->attr.inherit_stat)
return;
/*
* Update the event value, we cannot use perf_event_read()
* because we're in the middle of a context switch and have IRQs
* disabled, which upsets smp_call_function_single(), however
* we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
event->pmu->read(event);
/* fall-through */
case PERF_EVENT_STATE_INACTIVE:
update_event_times(event);
break;
default:
break;
}
/*
* In order to keep per-task stats reliable we need to flip the event
* values when we flip the contexts.
*/
value = local64_read(&next_event->count);
value = local64_xchg(&event->count, value);
local64_set(&next_event->count, value);
swap(event->total_time_enabled, next_event->total_time_enabled);
swap(event->total_time_running, next_event->total_time_running);
/*
* Since we swizzled the values, update the user visible data too.
*/
perf_event_update_userpage(event);
perf_event_update_userpage(next_event);
}
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{
struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
update_context_time(ctx);
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
next_event = list_first_entry(&next_ctx->event_list,
struct perf_event, event_entry);
while (&event->event_entry != &ctx->event_list &&
&next_event->event_entry != &next_ctx->event_list) {
__perf_event_sync_stat(event, next_event);
event = list_next_entry(event, event_entry);
next_event = list_next_entry(next_event, event_entry);
}
}
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
struct perf_cpu_context *cpuctx;
int do_switch = 1;
if (likely(!ctx))
return;
cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
next_ctx = next->perf_event_ctxp[ctxn];
if (parent && next_ctx &&
rcu_dereference(next_ctx->parent_ctx) == parent) {
/*
* Looks like the two contexts are clones, so we might be
* able to optimize the context switch. We lock both
* contexts and check that they are clones under the
* lock (including re-checking that neither has been
* uncloned in the meantime). It doesn't matter which
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
* wrt to rcu_dereference() of perf_event_ctxp
*/
task->perf_event_ctxp[ctxn] = next_ctx;
next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
}
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
if (do_switch) {
raw_spin_lock(&ctx->lock);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
raw_spin_unlock(&ctx->lock);
}
}
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
/*
* Called from scheduler to remove the events of the current task,
* with interrupts disabled.
*
* We stop each event and update the event value in event->count.
*
* This does not protect us against NMI, but disable()
* sets the disabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* not restart the event.
*/
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
int ctxn;
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
static void task_ctx_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
/*
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
static void
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
/*
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_ERROR;
}
}
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
int can_add_hw = 1;
list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
/* Ignore events in OFF or ERROR state */
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
/*
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
}
}
}
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
u64 now;
int is_active = ctx->is_active;
ctx->is_active |= event_type;
if (likely(!ctx->nr_events))
return;
now = perf_clock();
ctx->timestamp = now;
perf_cgroup_set_timestamp(task, ctx);
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
ctx_flexible_sched_in(ctx, cpuctx);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type, task);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx->nr_events)
cpuctx->task_ctx = ctx;
perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
}
/*
* When sampling the branck stack in system-wide, it may be necessary
* to flush the stack on context switch. This happens when the branch
* stack does not tag its entries with the pid of the current task.
* Otherwise it becomes impossible to associate a branch entry with a
* task. This ambiguity is more likely to appear when the branch stack
* supports priv level filtering and the user sets it to monitor only
* at the user level (which could be a useful measurement in system-wide
* mode). In that case, the risk is high of having a branch stack with
* branch from multiple tasks. Flushing may mean dropping the existing
* entries or stashing them somewhere in the PMU specific code layer.
*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when there is at least one system-wide context
* with at least one active event using taken branch sampling.
*/
static void perf_branch_stack_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/* no need to flush branch stack if not changing task */
if (prev == task)
return;
local_irq_save(flags);
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
/*
* check if the context has at least one
* event using PERF_SAMPLE_BRANCH_STACK
*/
if (cpuctx->ctx.nr_branch_stack > 0
&& pmu->flush_branch_stack) {
pmu = cpuctx->ctx.pmu;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->flush_branch_stack();
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
*
* We restore the event value and then enable it.
*
* This does not protect us against NMI, but enable()
* sets the enabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
continue;
perf_event_context_sched_in(ctx, task);
}
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
/*
* We got @count in @nsec, with a target of sample_freq HZ
* the target period becomes:
*
* @count * 10^9
* period = -------------------
* @nsec * sample_freq
*
*/
/*
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
/*
* Reduce accuracy until either term fits in a u64, then proceed with
* the other, so that finally we can do a u64/u64 division.
*/
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
}
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
s64 delta;
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
sample_period = hwc->sample_period + delta;
if (!sample_period)
sample_period = 1;
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
}
}
/*
* combine freq adjustment with unthrottling to avoid two passes over the
* events. At the same time, make sure, having freq events does not change
* the rate of unthrottling as that would introduce bias.
*/
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
/*
* only need to iterate over all events iff:
* - context have events in frequency mode (needs freq adjust)
* - there are events to unthrottle on this cpu
*/
if (!(ctx->nr_freq || needs_unthr))
return;
raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
hwc = &event->hw;
if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
/*
* stop the event and update event->count
*/
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
/*
* restart the event
* reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
}
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
/*
* Round-robin a context's events:
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
}
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0, remove = 1;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
if (!rotate)
goto done;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
perf_event_sched_in(cpuctx, ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
}
void perf_event_task_tick(void)
{
struct list_head *head = &__get_cpu_var(rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
int throttled;
WARN_ON(!irqs_disabled());
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
ctx = &cpuctx->ctx;
perf_adjust_freq_unthr_context(ctx, throttled);
ctx = cpuctx->task_ctx;
if (ctx)
perf_adjust_freq_unthr_context(ctx, throttled);
if (cpuctx->jiffies_interval == 1 ||
!(jiffies % cpuctx->jiffies_interval))
perf_rotate_context(cpuctx);
}
}
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{
if (!event->attr.enable_on_exec)
return 0;
event->attr.enable_on_exec = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
__perf_event_mark_enabled(event);
return 1;
}
/*
* Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
struct perf_event *event;
unsigned long flags;
int enabled = 0;
int ret;
local_irq_save(flags);
if (!ctx || !ctx->nr_events)
goto out;
/*
* We must ctxsw out cgroup events to avoid conflict
* when invoking perf_task_event_sched_in() later on
* in this function. Otherwise we end up trying to
* ctxswin cgroup events which are already scheduled
* in.
*/
perf_cgroup_sched_out(current, NULL);
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
}
/*
* Unclone this context if we enabled any event.
*/
if (enabled)
unclone_ctx(ctx);
raw_spin_unlock(&ctx->lock);
/*
* Also calls ctxswin for cgroup events, if any:
*/
perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
}
/*
* Cross CPU call to read the hardware event
*/
static void __perf_event_read(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a task context, we need to check whether it is
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived. In that case
* event->count would have been updated to a recent sample
* when the event was scheduled out.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
raw_spin_lock(&ctx->lock);
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
}
static inline u64 perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
static u64 perf_event_read(struct perf_event *event)
{
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* may read while context is not active
* (e.g., thread is blocked), in that case
* we cannot update context time
*/
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return perf_event_count(event);
}
/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
}
static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
struct perf_event_context *ctx;
ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!ctx)
return NULL;
__perf_event_init_context(ctx);
if (task) {
ctx->task = task;
get_task_struct(task);
}
ctx->pmu = pmu;
return ctx;
}
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
int err;
rcu_read_lock();
if (!vpid)
task = current;
else
task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
return ERR_PTR(-ESRCH);
/* Reuse ptrace permission checks for now. */
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto errout;
return task;
errout:
put_task_struct(task);
return ERR_PTR(err);
}
/*
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
struct perf_event_context *ctx;
struct perf_cpu_context *cpuctx;
unsigned long flags;
int ctxn, err;
if (!task) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
* We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
++ctx->pin_count;
return ctx;
}
err = -EINVAL;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto errout;
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
} else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
* If it has already passed perf_event_exit_task().
* we must see PF_EXITING, it takes this mutex too.
*/
if (task->flags & PF_EXITING)
err = -ESRCH;
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
goto errout;
}
}
return ctx;
errout:
return ERR_PTR(err);
}
static void perf_event_free_filter(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event;
event = container_of(head, struct perf_event, rcu_head);
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
kfree(event);
}
static void ring_buffer_put(struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
if (event->rb) {
ring_buffer_put(event->rb);
event->rb = NULL;
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
* 1) there is a lock recursion from perf_event_exit_task
* see the comment there.
*
* 2) there is a lock-inversion with mmap_sem through
* perf_event_read_group(), which takes faults while
* holding ctx->mutex, however this is called after
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
raw_spin_unlock_irq(&ctx->lock);
perf_remove_from_context(event);
mutex_unlock(&ctx->mutex);
free_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Called when the last reference to the file is gone.
*/
static void put_event(struct perf_event *event)
{
struct task_struct *owner;
if (!atomic_long_dec_and_test(&event->refcount))
return;
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/*
* Matches the smp_wmb() in perf_event_exit_task(). If we observe
* !owner it means the list deletion is complete and we can indeed
* free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
smp_read_barrier_depends();
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
* task reference we can safely take a new reference
* while holding the rcu_read_lock().
*/
get_task_struct(owner);
}
rcu_read_unlock();
if (owner) {
mutex_lock(&owner->perf_event_mutex);
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
* ensured they're done, and we can proceed with freeing the
* event.
*/
if (event->owner)
list_del_init(&event->owner_entry);
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
perf_event_release_kernel(event);
}
static int perf_release(struct inode *inode, struct file *file)
{
put_event(file->private_data);
return 0;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
mutex_unlock(&event->child_mutex);
return total;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
u64 values[5];
u64 count, enabled, running;
mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
values[n++] = count;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
goto unlock;
ret = size;
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
values[n++] = perf_event_read_value(sub, &enabled, &running);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
ret = -EFAULT;
goto unlock;
}
ret += size;
}
unlock:
mutex_unlock(&ctx->mutex);
return ret;
}
static int perf_event_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
u64 values[4];
int n = 0;
values[n++] = perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
return n * sizeof(u64);
}
/*
* Read the performance event - simple non blocking version for now
*/
static ssize_t
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
/*
* Return end-of-file for a read on a event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
if (count < event->read_size)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_event_read_group(event, read_format, buf);
else
ret = perf_event_read_one(event, read_format, buf);
return ret;
}
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
return perf_read_hw(event, buf, count);
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
/*
* Race between perf_event_set_output() and perf_poll(): perf_poll()
* grabs the rb reference but perf_event_set_output() overrides it.
* Here is the timeline for two threads T1, T2:
* t0: T1, rb = rcu_dereference(event->rb)
* t1: T2, old_rb = event->rb
* t2: T2, event->rb = new rb
* t3: T2, ring_buffer_detach(old_rb)
* t4: T1, ring_buffer_attach(rb1)
* t5: T1, poll_wait(event->waitq)
*
* To avoid this problem, we grab mmap_mutex in perf_poll()
* thereby ensuring that the assignment of the new ring buffer
* and the detachment of the old buffer appear atomic to perf_poll()
*/
mutex_lock(&event->mmap_mutex);
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
}
rcu_read_unlock();
mutex_unlock(&event->mmap_mutex);
poll_wait(file, &event->waitq, wait);
return events;
}
static void perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
* in sync_child_event if it goes to exit, thus satisfying the
* task existence requirements of perf_event_enable/disable.
*/
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
func(child);
mutex_unlock(&event->child_mutex);
}
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
int ret = 0;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->hw.sample_period = value;
}
unlock:
raw_spin_unlock_irq(&ctx->lock);
return ret;
}
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
*p = f;
return 0;
}
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
int perf_event_task_enable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_enable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
int perf_event_task_disable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_disable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
return event->pmu->event_idx(event);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
* code calls this from NMI context.
*/
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
arch_perf_update_userpage(userpg, now);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
ret = 0;
return ret;
}
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
get_page(vmf->page);
vmf->page->mapping = vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
rcu_read_unlock();
return ret;
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (!list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
if (!list_empty(&event->rb_entry))
goto unlock;
list_add(&event->rb_entry, &rb->event_list);
unlock:
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_detach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
list_del_init(&event->rb_entry);
wake_up_all(&event->waitq);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_wakeup(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
unlock:
rcu_read_unlock();
}
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
if (!atomic_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
return rb;
}
static void ring_buffer_put(struct ring_buffer *rb)
{
struct perf_event *event, *n;
unsigned long flags;
if (!atomic_dec_and_test(&rb->refcount))
return;
spin_lock_irqsave(&rb->event_lock, flags);
list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
list_del_init(&event->rb_entry);
wake_up_all(&event->waitq);
}
spin_unlock_irqrestore(&rb->event_lock, flags);
call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
}
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
unsigned long size = perf_data_size(event->rb);
struct user_struct *user = event->mmap_user;
struct ring_buffer *rb = event->rb;
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb);
free_uid(user);
}
}
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
struct ring_buffer *rb;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
int ret = 0, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
* same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
nr_pages = (vma_size / PAGE_SIZE) - 1;
/*
* If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
if (vma->vm_pgoff != 0)
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->mmap_mutex);
if (event->rb) {
if (event->rb->nr_pages == nr_pages)
atomic_inc(&event->rb->refcount);
else
ret = -EINVAL;
goto unlock;
}
user_extra = nr_pages + 1;
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
* Increase the limit linearly with more CPUs:
*/
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
extra = 0;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
WARN_ON(event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
rb = rb_alloc(nr_pages,
event->attr.watermark ? event->attr.wakeup_watermark : 0,
event->cpu, flags);
if (!rb) {
ret = -ENOMEM;
goto unlock;
}
rcu_assign_pointer(event->rb, rb);
atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra;
event->mmap_user = get_current_user();
vma->vm_mm->pinned_vm += event->mmap_locked;
perf_event_update_userpage(event);
unlock:
if (!ret)
atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
return ret;
}
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, &event->fasync);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
return 0;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.mmap = perf_mmap,
.fasync = perf_fasync,
};
/*
* Perf event wakeup
*
* If there's data, ensure we set the poll() state and publish everything
* to user-space before waking everybody up.
*/
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
if (event->pending_disable) {
event->pending_disable = 0;
__perf_event_disable(event);
}
if (event->pending_wakeup) {
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
}
/*
* We assume there is only KVM supporting the callbacks.
* Later on, we might change it to a list if there is
* another virtualization implementation supporting the callbacks.
*/
struct perf_guest_info_callbacks *perf_guest_cbs;
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = cbs;
return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{
int bit;
for_each_set_bit(bit, (const unsigned long *) &mask,
sizeof(mask) * BITS_PER_BYTE) {
u64 val;
val = perf_reg_value(regs, bit);
perf_output_put(handle, val);
}
}
static void perf_sample_regs_user(struct perf_regs_user *regs_user,
struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
regs_user->regs = regs;
regs_user->abi = perf_reg_abi(current);
}
}
/*
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
* precisly, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
unsigned long addr = perf_user_stack_pointer(regs);
if (!addr || addr >= TASK_SIZE)
return 0;
return TASK_SIZE - addr;
}
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{
u64 task_size;
/* No regs, no stack pointer, no dump. */
if (!regs)
return 0;
/*
* Check if we fit in with the requested stack size into the:
* - TASK_SIZE
* If we don't, we limit the size to the TASK_SIZE.
*
* - remaining sample size
* If we don't, we customize the stack size to
* fit in to the remaining sample size.
*/
task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
stack_size = min(stack_size, (u16) task_size);
/* Current header size plus static size and dynamic size. */
header_size += 2 * sizeof(u64);
/* Do we fit in with the current stack dump size? */
if ((u16) (header_size + stack_size) < header_size) {
/*
* If we overflow the maximum size for the sample,
* we customize the stack dump size to fit in.
*/
stack_size = USHRT_MAX - header_size - sizeof(u64);
stack_size = round_up(stack_size, sizeof(u64));
}
return stack_size;
}
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{
/* Case of a kernel thread, nothing to dump */
if (!regs) {
u64 size = 0;
perf_output_put(handle, size);
} else {
unsigned long sp;
unsigned int rem;
u64 dyn_size;
/*
* We dump:
* static size
* - the size requested by user or the best one we can fit
* in to the sample max size
* data
* - user stack dump data
* dynamic size
* - the actual dumped size
*/
/* Static size. */
perf_output_put(handle, dump_size);
/* Data. */
sp = perf_user_stack_pointer(regs);
rem = __output_copy_user(handle, (void *) sp, dump_size);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
/* Dynamic size. */
perf_output_put(handle, dyn_size);
}
}
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;
data->type = sample_type;
header->size += event->id_header_size;
if (sample_type & PERF_SAMPLE_TID) {
/* namespace issues */
data->tid_entry.pid = perf_event_pid(event, current);
data->tid_entry.tid = perf_event_tid(event, current);
}
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_clock();
if (sample_type & PERF_SAMPLE_ID)
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
data->stream_id = event->id;
if (sample_type & PERF_SAMPLE_CPU) {
data->cpu_entry.cpu = raw_smp_processor_id();
data->cpu_entry.reserved = 0;
}
}
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
}
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
}
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
u64 values[4];
int n = 0;
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] = running +
atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
__output_copy(handle, values, n * sizeof(u64));
}
/*
* XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (leader != event)
leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
__output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
if (sub != event)
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
__output_copy(handle, values, n * sizeof(u64));
}
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we are called in
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = data->type;
perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
if (data->callchain) {
int size = 1;
if (data->callchain)
size += data->callchain->nr;
size *= sizeof(u64);
__output_copy(handle, data->callchain, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_RAW) {
if (data->raw) {
perf_output_put(handle, data->raw->size);
__output_copy(handle, data->raw->data,
data->raw->size);
} else {
struct {
u32 size;
u32 data;
} raw = {
.size = sizeof(u32),
.data = 0,
};
perf_output_put(handle, raw);
}
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
size = data->br_stack->nr
* sizeof(struct perf_branch_entry);
perf_output_put(handle, data->br_stack->nr);
perf_output_copy(handle, data->br_stack->entries, size);
} else {
/*
* we always store at least the value of nr
*/
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
u64 abi = data->regs_user.abi;
/*
* If there are no regs to dump, notice it through
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
*/
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_user;
perf_output_sample_regs(handle,
data->regs_user.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_STACK_USER)
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
}
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
int size = sizeof(u32);
if (data->raw)
size += data->raw->size;
else
size += sizeof(u32);
WARN_ON_ONCE(size & (sizeof(u64)-1));
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_user(&data->regs_user, regs);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
struct perf_regs_user *uregs = &data->regs_user;
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
if (!uregs->abi)
perf_sample_regs_user(uregs, regs);
stack_size = perf_sample_ustack_size(stack_size, header->size,
uregs->regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
}
static void perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_output_handle handle;
struct perf_event_header header;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
exit:
rcu_read_unlock();
}
/*
* read event_id
*/
struct perf_read_event {
struct perf_event_header header;
u32 pid;
u32 tid;
};
static void
perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_read_event read_event = {
.header = {
.type = PERF_RECORD_READ,
.misc = 0,
.size = sizeof(read_event) + event->read_size,
},
.pid = perf_event_pid(event, task),
.tid = perf_event_tid(event, task),
};
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size);
if (ret)
return;
perf_output_put(&handle, read_event);
perf_output_read(&handle, event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* task tracking -- fork/exit
*
* enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
*/
struct perf_task_event {
struct task_struct *task;
struct perf_event_context *task_ctx;
struct {
struct perf_event_header header;
u32 pid;
u32 ppid;
u32 tid;
u32 ptid;
u64 time;
} event_id;
};
static void perf_event_task_output(struct perf_event *event,
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
task_event->event_id.header.size);
if (ret)
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
perf_output_put(&handle, task_event->event_id);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
task_event->event_id.header.size = size;
}
static int perf_event_task_match(struct perf_event *event)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (!event_filter_match(event))
return 0;
if (event->attr.comm || event->attr.mmap ||
event->attr.mmap_data || event->attr.task)
return 1;
return 0;
}
static void perf_event_task_ctx(struct perf_event_context *ctx,
struct perf_task_event *task_event)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_task_match(event))
perf_event_task_output(event, task_event);
}
}
static void perf_event_task_event(struct perf_task_event *task_event)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct pmu *pmu;
int ctxn;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_task_ctx(&cpuctx->ctx, task_event);
ctx = task_event->task_ctx;
if (!ctx) {
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_event_task_ctx(ctx, task_event);
}
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
if (task_event->task_ctx)
perf_event_task_ctx(task_event->task_ctx, task_event);
rcu_read_unlock();
}
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
{
struct perf_task_event task_event;
if (!atomic_read(&nr_comm_events) &&
!atomic_read(&nr_mmap_events) &&
!atomic_read(&nr_task_events))
return;
task_event = (struct perf_task_event){
.task = task,
.task_ctx = task_ctx,
.event_id = {
.header = {
.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
.misc = 0,
.size = sizeof(task_event.event_id),
},
/* .pid */
/* .ppid */
/* .tid */
/* .ptid */
.time = perf_clock(),
},
};
perf_event_task_event(&task_event);
}
void perf_event_fork(struct task_struct *task)
{
perf_event_task(task, NULL, 1);
}
/*
* comm tracking
*/
struct perf_comm_event {
struct task_struct *task;
char *comm;
int comm_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
} event_id;
};
static void perf_event_comm_output(struct perf_event *event,
struct perf_comm_event *comm_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
int ret;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
if (ret)
goto out;
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
perf_output_put(&handle, comm_event->event_id);
__output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
comm_event->event_id.header.size = size;
}
static int perf_event_comm_match(struct perf_event *event)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (!event_filter_match(event))
return 0;
if (event->attr.comm)
return 1;
return 0;
}
static void perf_event_comm_ctx(struct perf_event_context *ctx,
struct perf_comm_event *comm_event)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_comm_match(event))
perf_event_comm_output(event, comm_event);
}
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
char comm[TASK_COMM_LEN];
unsigned int size;
struct pmu *pmu;
int ctxn;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
comm_event->comm_size = size;
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_event_comm_ctx(ctx, comm_event);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
rcu_read_unlock();
}
void perf_event_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctx);
}
if (!atomic_read(&nr_comm_events))
return;
comm_event = (struct perf_comm_event){
.task = task,
/* .comm */
/* .comm_size */
.event_id = {
.header = {
.type = PERF_RECORD_COMM,
.misc = 0,
/* .size */
},
/* .pid */
/* .tid */
},
};
perf_event_comm_event(&comm_event);
}
/*
* mmap tracking
*/
struct perf_mmap_event {
struct vm_area_struct *vma;
const char *file_name;
int file_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
u64 start;
u64 len;
u64 pgoff;
} event_id;
};
static void perf_event_mmap_output(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret;
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event,
int executable)
{
if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (!event_filter_match(event))
return 0;
if ((!executable && event->attr.mmap_data) ||
(executable && event->attr.mmap))
return 1;
return 0;
}
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
struct perf_mmap_event *mmap_event,
int executable)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (perf_event_mmap_match(event, mmap_event, executable))
perf_event_mmap_output(event, mmap_event);
}
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
unsigned int size;
char tmp[16];
char *buf = NULL;
const char *name;
struct pmu *pmu;
int ctxn;
memset(tmp, 0, sizeof(tmp));
if (file) {
/*
* d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
if (!buf) {
name = strncpy(tmp, "//enomem", sizeof(tmp));
goto got_name;
}
name = d_path(&file->f_path, buf, PATH_MAX);
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
sizeof(tmp) - 1);
tmp[sizeof(tmp) - 1] = '\0';
goto got_name;
}
if (!vma->vm_mm) {
name = strncpy(tmp, "[vdso]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_brk &&
vma->vm_end >= vma->vm_mm->brk) {
name = strncpy(tmp, "[heap]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
name = strncpy(tmp, "[stack]", sizeof(tmp));
goto got_name;
}
name = strncpy(tmp, "//anon", sizeof(tmp));
goto got_name;
}
got_name:
size = ALIGN(strlen(name)+1, sizeof(u64));
mmap_event->file_name = name;
mmap_event->file_size = size;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
vma->vm_flags & VM_EXEC);
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx) {
perf_event_mmap_ctx(ctx, mmap_event,
vma->vm_flags & VM_EXEC);
}
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
rcu_read_unlock();
kfree(buf);
}
void perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
.misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
};
perf_event_mmap_event(&mmap_event);
}
/*
* IRQ throttle logging
*/
static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
struct {
struct perf_event_header header;
u64 time;
u64 id;
u64 stream_id;
} throttle_event = {
.header = {
.type = PERF_RECORD_THROTTLE,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = perf_clock(),
.id = primary_event_id(event),
.stream_id = event->id,
};
if (enable)
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
throttle_event.header.size);
if (ret)
return;
perf_output_put(&handle, throttle_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* Generic event overflow handling, sampling.
*/
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
u64 seq;
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
hwc->interrupts_seq = seq;
hwc->interrupts = 1;
} else {
hwc->interrupts++;
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
}
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period, true);
}
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
event->pending_disable = 1;
irq_work_queue(&event->pending);
}
if (event->overflow_handler)
event->overflow_handler(event, data, regs);
else
perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
return ret;
}
int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return __perf_event_overflow(event, 1, data, regs);
}
/*
* Generic software event infrastructure
*/
struct swevent_htable {
struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex;
int hlist_refcount;
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
/*
* We directly increment event->count and keep a second value in
* event->hw.period_left to count intervals. This period event
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
static u64 perf_swevent_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again:
old = val = local64_read(&hwc->period_left);
if (val < 0)
return 0;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (local64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
return nr;
}
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
static void perf_swevent_event(struct perf_event *event, u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
local64_add(nr, &event->count);
if (!regs)
return;
if (!is_sampling_event(event))
return;
if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
data->period = nr;
return perf_swevent_overflow(event, 1, data, regs);
} else
data->period = event->hw.last_period;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left))
return;
perf_swevent_overflow(event, 0, data, regs);
}
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 1;
if (regs) {
if (event->attr.exclude_user && user_mode(regs))
return 1;
if (event->attr.exclude_kernel && !user_mode(regs))
return 1;
}
return 0;
}
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->attr.type != type)
return 0;
if (event->attr.config != event_id)
return 0;
if (perf_exclude_event(event, regs))
return 0;
return 1;
}
static inline u64 swevent_hash(u64 type, u32 event_id)
{
u64 val = event_id | (type << 32);
return hash_64(val, SWEVENT_HLIST_BITS);
}
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{
u64 hash = swevent_hash(type, event_id);
return &hlist->heads[hash];
}
/* For the read side: events when they trigger */
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{
struct swevent_hlist *hlist;
hlist = rcu_dereference(swhash->swevent_hlist);
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{
struct swevent_hlist *hlist;
u32 event_id = event->attr.config;
u64 type = event->attr.type;
/*
* Event scheduling is always serialized against hlist allocation
* and release. Which makes the protected version suitable here.
* The context lock guarantees that.
*/
hlist = rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&event->ctx->lock));
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_head *head;
rcu_read_lock();
head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
end:
rcu_read_unlock();
}
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
return get_recursion_context(swhash->recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
inline void perf_swevent_put_recursion_context(int rctx)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
put_recursion_context(swhash->recursion, rctx);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
}
static void perf_swevent_read(struct perf_event *event)
{
}
static int perf_swevent_add(struct perf_event *event, int flags)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
if (is_sampling_event(event)) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
if (WARN_ON_ONCE(!head))
return -EINVAL;
hlist_add_head_rcu(&event->hlist_entry, head);
return 0;
}
static void perf_swevent_del(struct perf_event *event, int flags)
{
hlist_del_rcu(&event->hlist_entry);
}
static void perf_swevent_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
}
static void perf_swevent_stop(struct perf_event *event, int flags)
{
event->hw.state = PERF_HES_STOPPED;
}
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{
return rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&swhash->hlist_mutex));
}
static void swevent_hlist_release(struct swevent_htable *swhash)
{
struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
if (!hlist)
return;
rcu_assign_pointer(swhash->swevent_hlist, NULL);
kfree_rcu(hlist, rcu_head);
}
static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (!--swhash->hlist_refcount)
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
static void swevent_hlist_put(struct perf_event *event)
{
int cpu;
if (event->cpu != -1) {
swevent_hlist_put_cpu(event, event->cpu);
return;
}
for_each_possible_cpu(cpu)
swevent_hlist_put_cpu(event, cpu);
}
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
if (!hlist) {
err = -ENOMEM;
goto exit;
}
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
swhash->hlist_refcount++;
exit:
mutex_unlock(&swhash->hlist_mutex);
return err;
}
static int swevent_hlist_get(struct perf_event *event)
{
int err;
int cpu, failed_cpu;
if (event->cpu != -1)
return swevent_hlist_get_cpu(event, event->cpu);
get_online_cpus();
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(event, cpu);
if (err) {
failed_cpu = cpu;
goto fail;
}
}
put_online_cpus();
return 0;
fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
swevent_hlist_put_cpu(event, cpu);
}
put_online_cpus();
return err;
}
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
{
u64 event_id = event->attr.config;
WARN_ON(event->parent);
static_key_slow_dec(&perf_swevent_enabled[event_id]);
swevent_hlist_put(event);
}
static int perf_swevent_init(struct perf_event *event)
{
int event_id = event->attr.config;
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
case PERF_COUNT_SW_TASK_CLOCK:
return -ENOENT;
default:
break;
}
if (event_id >= PERF_COUNT_SW_MAX)
return -ENOENT;
if (!event->parent) {
int err;
err = swevent_hlist_get(event);
if (err)
return err;
static_key_slow_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
return 0;
}
static int perf_swevent_event_idx(struct perf_event *event)
{
return 0;
}
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_swevent_init,
.add = perf_swevent_add,
.del = perf_swevent_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
};
#ifdef CONFIG_EVENT_TRACING
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{
void *record = data->raw->data;
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;
}
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
/*
* All tracepoints are from kernel-space.
*/
if (event->attr.exclude_kernel)
return 0;
if (!perf_tp_filter_match(event, data))
return 0;
return 1;
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = {
.size = entry_size,
.data = record,
};
perf_sample_data_init(&data, addr, 0);
data.raw = &raw;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
/*
* If we got specified a target task, also iterate its context and
* deliver this event there too.
*/
if (task && task != current) {
struct perf_event_context *ctx;
struct trace_entry *entry = record;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
if (!ctx)
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
unlock:
rcu_read_unlock();
}
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
static void tp_perf_event_destroy(struct perf_event *event)
{
perf_trace_destroy(event);
}
static int perf_tp_event_init(struct perf_event *event)
{
int err;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -ENOENT;
/*
* no branch sampling for tracepoint events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
err = perf_trace_init(event);
if (err)
return err;
event->destroy = tp_perf_event_destroy;
return 0;
}
static struct pmu perf_tracepoint = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_tp_event_init,
.add = perf_trace_add,
.del = perf_trace_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
};
static inline void perf_tp_register(void)
{
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
int ret;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;
filter_str = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(filter_str))
return PTR_ERR(filter_str);
ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
kfree(filter_str);
return ret;
}
static void perf_event_free_filter(struct perf_event *event)
{
ftrace_profile_free_filter(event);
}
#else
static inline void perf_tp_register(void)
{
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
return -ENOENT;
}
static void perf_event_free_filter(struct perf_event *event)
{
}
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs);
}
#endif
/*
* hrtimer based swevent callback
*/
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
struct perf_event *event;
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current)))
if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
static void perf_swevent_start_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 period;
if (!is_sampling_event(event))
return;
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
period = 10000;
local64_set(&hwc->period_left, 0);
} else {
period = max_t(u64, 10000, hwc->sample_period);
}
__hrtimer_start_range_ns(&hwc->hrtimer,
ns_to_ktime(period), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (is_sampling_event(event)) {
ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
local64_set(&hwc->period_left, ktime_to_ns(remaining));
hrtimer_cancel(&hwc->hrtimer);
}
}
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!is_sampling_event(event))
return;
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
* mapping and avoid the whole period adjust feedback stuff.
*/
if (event->attr.freq) {
long freq = event->attr.sample_freq;
event->attr.sample_period = NSEC_PER_SEC / freq;
hwc->sample_period = event->attr.sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
hwc->last_period = hwc->sample_period;
event->attr.freq = 0;
}
}
/*
* Software event: cpu wall time clock
*/
static void cpu_clock_event_update(struct perf_event *event)
{
s64 prev;
u64 now;
now = local_clock();
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void cpu_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, local_clock());
perf_swevent_start_hrtimer(event);
}
static void cpu_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
cpu_clock_event_update(event);
}
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
return 0;
}
static void cpu_clock_event_del(struct perf_event *event, int flags)
{
cpu_clock_event_stop(event, flags);
}
static void cpu_clock_event_read(struct perf_event *event)
{
cpu_clock_event_update(event);
}
static int cpu_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_cpu_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = cpu_clock_event_init,
.add = cpu_clock_event_add,
.del = cpu_clock_event_del,
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
.event_idx = perf_swevent_event_idx,
};
/*
* Software event: task time clock
*/
static void task_clock_event_update(struct perf_event *event, u64 now)
{
u64 prev;
s64 delta;
prev = local64_xchg(&event->hw.prev_count, now);
delta = now - prev;
local64_add(delta, &event->count);
}
static void task_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, event->ctx->time);
perf_swevent_start_hrtimer(event);
}
static void task_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
task_clock_event_update(event, event->ctx->time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
return 0;
}
static void task_clock_event_del(struct perf_event *event, int flags)
{
task_clock_event_stop(event, PERF_EF_UPDATE);
}
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
u64 delta = now - event->ctx->timestamp;
u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
}
static int task_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_task_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = task_clock_event_init,
.add = task_clock_event_add,
.del = task_clock_event_del,
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
.event_idx = perf_swevent_event_idx,
};
static void perf_pmu_nop_void(struct pmu *pmu)
{
}
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
static void perf_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}
static int perf_event_idx_default(struct perf_event *event)
{
return event->hw.idx + 1;
}
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
*/
static void *find_pmu_context(int ctxn)
{
struct pmu *pmu;
if (ctxn < 0)
return NULL;
list_for_each_entry(pmu, &pmus, entry) {
if (pmu->task_ctx_nr == ctxn)
return pmu->pmu_cpu_context;
}
return NULL;
}
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
if (cpuctx->unique_pmu == old_pmu)
cpuctx->unique_pmu = pmu;
}
}
static void free_pmu_context(struct pmu *pmu)
{
struct pmu *i;
mutex_lock(&pmus_lock);
/*
* Like a real lame refcount.
*/
list_for_each_entry(i, &pmus, entry) {
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
update_pmu_context(i, pmu);
goto out;
}
}
free_percpu(pmu->pmu_cpu_context);
out:
mutex_unlock(&pmus_lock);
}
static struct idr pmu_idr;
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
static struct device_attribute pmu_dev_attrs[] = {
__ATTR_RO(type),
__ATTR_NULL,
};
static int pmu_bus_running;
static struct bus_type pmu_bus = {
.name = "event_source",
.dev_attrs = pmu_dev_attrs,
};
static void pmu_dev_release(struct device *dev)
{
kfree(dev);
}
static int pmu_dev_alloc(struct pmu *pmu)
{
int ret = -ENOMEM;
pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!pmu->dev)
goto out;
pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
goto free_dev;
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
pmu->dev->release = pmu_dev_release;
ret = device_add(pmu->dev);
if (ret)
goto free_dev;
out:
return ret;
free_dev:
put_device(pmu->dev);
goto out;
}
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, char *name, int type)
{
int cpu, ret;
mutex_lock(&pmus_lock);
ret = -ENOMEM;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;
pmu->type = -1;
if (!name)
goto skip_type;
pmu->name = name;
if (type < 0) {
type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
if (type < 0) {
ret = type;
goto free_pdc;
}
}
pmu->type = type;
if (pmu_bus_running) {
ret = pmu_dev_alloc(pmu);
if (ret)
goto free_idr;
}
skip_type:
pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
if (pmu->pmu_cpu_context)
goto got_cpu_context;
ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->unique_pmu = pmu;
}
got_cpu_context:
if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_void;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}
if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
free_dev:
device_del(pmu->dev);
put_device(pmu->dev);
free_idr:
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
free_pdc:
free_percpu(pmu->pmu_disable_count);
goto unlock;
}
void perf_pmu_unregister(struct pmu *pmu)
{
mutex_lock(&pmus_lock);
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
* synchronize against both of those.
*/
synchronize_srcu(&pmus_srcu);
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
device_del(pmu->dev);
put_device(pmu->dev);
free_pmu_context(pmu);
}
struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
int idx;
int ret;
idx = srcu_read_lock(&pmus_srcu);
rcu_read_lock();
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (!ret)
goto unlock;
if (ret != -ENOENT) {
pmu = ERR_PTR(ret);
goto unlock;
}
}
pmu = ERR_PTR(-ENOENT);
unlock:
srcu_read_unlock(&pmus_srcu, idx);
return pmu;
}
/*
* Allocate and initialize a event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
long err;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
/*
* Single events are their own group leaders, with an
* empty sibling list:
*/
if (!group_leader)
group_leader = event;
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
atomic_long_set(&event->refcount, 1);
event->cpu = cpu;
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
event->oncpu = -1;
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
event->id = atomic64_inc_return(&perf_event_id);
event->state = PERF_EVENT_STATE_INACTIVE;
if (task) {
event->attach_state = PERF_ATTACH_TASK;
if (attr->type == PERF_TYPE_TRACEPOINT)
event->hw.tp_target = task;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* hw_breakpoint is a bit difficult here..
*/
else if (attr->type == PERF_TYPE_BREAKPOINT)
event->hw.bp_target = task;
#endif
}
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
}
event->overflow_handler = overflow_handler;
event->overflow_handler_context = context;
perf_event__state_init(event);
pmu = NULL;
hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
/*
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done;
pmu = perf_init_event(event);
done:
err = 0;
if (!pmu)
err = -EINVAL;
else if (IS_ERR(pmu))
err = PTR_ERR(pmu);
if (err) {
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
}
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err) {
free_event(event);
return ERR_PTR(err);
}
}
if (has_branch_stack(event)) {
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
return event;
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr)
{
u32 size;
int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
/*
* zero the full structure, so that a short copy will be nice.
*/
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
if (size > PAGE_SIZE) /* silly large */
goto err_size;
if (!size) /* abi compat */
size = PERF_ATTR_SIZE_VER0;
if (size < PERF_ATTR_SIZE_VER0)
goto err_size;
/*
* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(*attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
if (ret)
return -EFAULT;
if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
return -EINVAL;
if (attr->read_format & ~(PERF_FORMAT_MAX-1))
return -EINVAL;
if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
u64 mask = attr->branch_sample_type;
/* only using defined bits */
if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
return -EINVAL;
/* at least one branch bit must be set */
if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
return -EINVAL;
/* kernel level capture: check permissions */
if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
&& perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
/* propagate priv level, when not set for branch */
if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
/* exclude_kernel checked on syscall entry */
if (!attr->exclude_kernel)
mask |= PERF_SAMPLE_BRANCH_KERNEL;
if (!attr->exclude_user)
mask |= PERF_SAMPLE_BRANCH_USER;
if (!attr->exclude_hv)
mask |= PERF_SAMPLE_BRANCH_HV;
/*
* adjust user setting (for HW filter setup)
*/
attr->branch_sample_type = mask;
}
}
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
ret = perf_reg_validate(attr->sample_regs_user);
if (ret)
return ret;
}
if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
if (!arch_perf_have_user_stack_dump())
return -ENOSYS;
/*
* We have __u32 type for the size, but so far
* we can only use __u16 as maximum due to the
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL;
}
out:
return ret;
err_size:
put_user(sizeof(*attr), &uattr->size);
ret = -E2BIG;
goto out;
}
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct ring_buffer *rb = NULL, *old_rb = NULL;
int ret = -EINVAL;
if (!output_event)
goto set;
/* don't allow circular references */
if (event == output_event)
goto out;
/*
* Don't allow cross-cpu buffers
*/
if (output_event->cpu != event->cpu)
goto out;
/*
* If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count))
goto unlock;
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
if (!rb)
goto unlock;
}
old_rb = event->rb;
rcu_assign_pointer(event->rb, rb);
if (old_rb)
ring_buffer_detach(event, old_rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
if (old_rb)
ring_buffer_put(old_rb);
out:
return ret;
}
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
* @attr_uptr: event_id type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader event fd
*/
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
int move_group = 0;
int err;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
}
/*
* In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument
* designates the cpu on which to monitor threads from that
* cgroup.
*/
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
event_fd = get_unused_fd();
if (event_fd < 0)
return event_fd;
if (group_fd != -1) {
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
group_leader = group.file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
group_leader = NULL;
}
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto err_group_fd;
}
}
get_online_cpus();
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_task;
}
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err)
goto err_alloc;
/*
* one more event:
* - that has cgroup constraint on event->cpu
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key);
}
/*
* Special case software events and allow them to be part of
* any hardware group.
*/
pmu = event->pmu;
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event->cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
if (task) {
put_task_struct(task);
task = NULL;
}
/*
* Look up the group leader (we will attach this event to it):
*/
if (group_leader) {
err = -EINVAL;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
if (move_group) {
if (group_leader->ctx->type != ctx->type)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
goto err_context;
}
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader);
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling);
perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (move_group) {
synchronize_rcu();
perf_install_in_context(ctx, group_leader, event->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, event->cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, event->cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
put_online_cpus();
event->owner = current;
mutex_lock(¤t->perf_event_mutex);
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
mutex_unlock(¤t->perf_event_mutex);
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(event);
perf_event__id_header_size(event);
/*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
* perf_group_detach().
*/
fdput(group);
fd_install(event_fd, event_file);
return event_fd;
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
err_task:
put_online_cpus();
if (task)
put_task_struct(task);
err_group_fd:
fdput(group);
err_fd:
put_unused_fd(event_fd);
return err;
}
/**
* perf_event_create_kernel_counter
*
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
* @task: task to profile (NULL for percpu)
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
}
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
err_free:
free_event(event);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{
struct perf_event_context *src_ctx;
struct perf_event_context *dst_ctx;
struct perf_event *event, *tmp;
LIST_HEAD(events);
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
mutex_lock(&src_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event);
put_ctx(src_ctx);
list_add(&event->event_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &events, event_entry) {
list_del(&event->event_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat)
perf_event_read_event(child_event, child);
child_val = perf_event_count(child_event);
/*
* Add back the child's count to the parent's count:
*/
atomic64_add(child_val, &parent_event->child_count);
atomic64_add(child_event->total_time_enabled,
&parent_event->child_total_time_enabled);
atomic64_add(child_event->total_time_running,
&parent_event->child_total_time_running);
/*
* Remove this event from the parent's list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_del_init(&child_event->child_list);
mutex_unlock(&parent_event->child_mutex);
/*
* Release the parent event, if this was the last
* reference to it.
*/
put_event(parent_event);
}
static void
__perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
if (child_event->parent) {
raw_spin_lock_irq(&child_ctx->lock);
perf_group_detach(child_event);
raw_spin_unlock_irq(&child_ctx->lock);
}
perf_remove_from_context(child_event);
/*
* It can happen that the parent exits first, and has events
* that are still around due to the child reference. These
* events need to be zapped.
*/
if (child_event->parent) {
sync_child_event(child_event, child);
free_event(child_event);
}
}
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *tmp;
struct perf_event_context *child_ctx;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
perf_event_task(child, NULL, 0);
return;
}
local_irq_save(flags);
/*
* We can't reschedule here because interrupts are disabled,
* and either child is current or it is a task that can't be
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
/*
* Take the context lock here so that if find_get_context is
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
* the events from it.
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
*/
perf_event_task(child, child_ctx, 0);
/*
* We can recurse on the same lock type through:
*
* __perf_event_exit_task()
* sync_child_event()
* put_event()
* mutex_lock(&ctx->mutex)
*
* But since its the parent context it won't be the same instance.
*/
mutex_lock(&child_ctx->mutex);
again:
list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
/*
* If the last event was a group event, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
if (!list_empty(&child_ctx->pinned_groups) ||
!list_empty(&child_ctx->flexible_groups))
goto again;
mutex_unlock(&child_ctx->mutex);
put_ctx(child_ctx);
}
/*
* When a child task exits, feed back event values to parent events.
*/
void perf_event_exit_task(struct task_struct *child)
{
struct perf_event *event, *tmp;
int ctxn;
mutex_lock(&child->perf_event_mutex);
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
owner_entry) {
list_del_init(&event->owner_entry);
/*
* Ensure the list deletion is visible before we clear
* the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex.
*/
smp_wmb();
event->owner = NULL;
}
mutex_unlock(&child->perf_event_mutex);
for_each_task_context_nr(ctxn)
perf_event_exit_task_context(child, ctxn);
}
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *parent = event->parent;
if (WARN_ON_ONCE(!parent))
return;
mutex_lock(&parent->child_mutex);
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
put_event(parent);
perf_group_detach(event);
list_del_event(event, ctx);
free_event(event);
}
/*
* free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
*/
void perf_event_free_task(struct task_struct *task)
{
struct perf_event_context *ctx;
struct perf_event *event, *tmp;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
mutex_lock(&ctx->mutex);
again:
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
group_entry)
perf_free_event(event, ctx);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
group_entry)
perf_free_event(event, ctx);
if (!list_empty(&ctx->pinned_groups) ||
!list_empty(&ctx->flexible_groups))
goto again;
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
}
void perf_event_delayed_put(struct task_struct *task)
{
int ctxn;
for_each_task_context_nr(ctxn)
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}
/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
struct perf_event *child_event;
unsigned long flags;
/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL);
if (IS_ERR(child_event))
return child_event;
if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
free_event(child_event);
return NULL;
}
get_ctx(child_ctx);
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;
hwc->sample_period = sample_period;
hwc->last_period = sample_period;
local64_set(&hwc->period_left, sample_period);
}
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(child_event);
perf_event__id_header_size(child_event);
/*
* Link it up in the child's context:
*/
raw_spin_lock_irqsave(&child_ctx->lock, flags);
add_event_to_ctx(child_event, child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);
return child_event;
}
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;
leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child, int ctxn,
int *inherited_all)
{
int ret;
struct perf_event_context *child_ctx;
if (!event->attr.inherit) {
*inherited_all = 0;
return 0;
}
child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
* inherit events that have been marked for cloning.
* First allocate and initialize a context for the
* child.
*/
child_ctx = alloc_perf_context(event->pmu, child);
if (!child_ctx)
return -ENOMEM;
child->perf_event_ctxp[ctxn] = child_ctx;
}
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret)
*inherited_all = 0;
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret)
return ret;
}
return 0;
}
static void __init perf_event_init_all_cpus(void)
{
struct swevent_htable *swhash;
int cpu;
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
}
}
static void __cpuinit perf_event_init_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
WARN_ON(!hlist);
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
mutex_unlock(&swhash->hlist_mutex);
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
WARN_ON(!irqs_disabled());
list_del_init(&cpuctx->rotation_list);
}
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
struct perf_event *event, *tmp;
perf_pmu_rotate_stop(ctx->pmu);
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
__perf_remove_from_context(event);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
__perf_remove_from_context(event);
}
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
#endif
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
int cpu;
for_each_online_cpu(cpu)
perf_event_exit_cpu(cpu);
return NOTIFY_OK;
}
/*
* Run the perf reboot notifier at the very last possible moment so that
* the generic watchdog code runs as long as possible.
*/
static struct notifier_block perf_reboot_notifier = {
.notifier_call = perf_reboot,
.priority = INT_MIN,
};
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
void __init perf_event_init(void)
{
int ret;
idr_init(&pmu_idr);
perf_event_init_all_cpus();
init_srcu_struct(&pmus_srcu);
perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
perf_pmu_register(&perf_cpu_clock, NULL, -1);
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
/* do not patch jump label more than once per second */
jump_label_rate_limit(&perf_sched_events, HZ);
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
*/
BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
!= 1024);
}
static int __init perf_event_sysfs_init(void)
{
struct pmu *pmu;
int ret;
mutex_lock(&pmus_lock);
ret = bus_register(&pmu_bus);
if (ret)
goto unlock;
list_for_each_entry(pmu, &pmus, entry) {
if (!pmu->name || pmu->type < 0)
continue;
ret = pmu_dev_alloc(pmu);
WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
}
pmu_bus_running = 1;
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
}
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = kzalloc(sizeof(*jc), GFP_KERNEL);
if (!jc)
return ERR_PTR(-ENOMEM);
jc->info = alloc_percpu(struct perf_cgroup_info);
if (!jc->info) {
kfree(jc);
return ERR_PTR(-ENOMEM);
}
return &jc->css;
}
static void perf_cgroup_css_free(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
struct perf_cgroup, css);
free_percpu(jc->info);
kfree(jc);
}
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
return 0;
}
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset)
task_function_call(task, __perf_cgroup_move, task);
}
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
task_function_call(task, __perf_cgroup_move, task);
}
struct cgroup_subsys perf_subsys = {
.name = "perf_event",
.subsys_id = perf_subsys_id,
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
.exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
/*
* perf_event cgroup doesn't handle nesting correctly.
* ctx->nr_cgroups adjustments should be propagated through the
* cgroup hierarchy. Fix it and remove the following.
*/
.broken_hierarchy = true,
};
#endif /* CONFIG_CGROUP_PERF */
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5640_0 |
crossvul-cpp_data_bad_2154_0 | /*-
* Copyright (c) 2008 Christos Zoulas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Parse Composite Document Files, the format used in Microsoft Office
* document files before they switched to zipped XML.
* Info from: http://sc.openoffice.org/compdocfileformat.pdf
*
* N.B. This is the "Composite Document File" format, and not the
* "Compound Document Format", nor the "Channel Definition Format".
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: cdf.c,v 1.63 2014/06/09 13:04:37 christos Exp $")
#endif
#include <assert.h>
#ifdef CDF_DEBUG
#include <err.h>
#endif
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <ctype.h>
#ifdef HAVE_LIMITS_H
#include <limits.h>
#endif
#ifndef EFTYPE
#define EFTYPE EINVAL
#endif
#include "cdf.h"
#ifdef CDF_DEBUG
#define DPRINTF(a) printf a, fflush(stdout)
#else
#define DPRINTF(a)
#endif
static union {
char s[4];
uint32_t u;
} cdf_bo;
#define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304)
#define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x)))
#define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x)))
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_TOLE(x) (sizeof(x) == 2 ? CDF_TOLE2(x) : (sizeof(x) == 4 ? \
CDF_TOLE4(x) : CDF_TOLE8(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
/*
* swap a short
*/
static uint16_t
_cdf_tole2(uint16_t sv)
{
uint16_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[1];
d[1] = s[0];
return rv;
}
/*
* swap an int
*/
static uint32_t
_cdf_tole4(uint32_t sv)
{
uint32_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[3];
d[1] = s[2];
d[2] = s[1];
d[3] = s[0];
return rv;
}
/*
* swap a quad
*/
static uint64_t
_cdf_tole8(uint64_t sv)
{
uint64_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[7];
d[1] = s[6];
d[2] = s[5];
d[3] = s[4];
d[4] = s[3];
d[5] = s[2];
d[6] = s[1];
d[7] = s[0];
return rv;
}
/*
* grab a uint32_t from a possibly unaligned address, and return it in
* the native host order.
*/
static uint32_t
cdf_getuint32(const uint8_t *p, size_t offs)
{
uint32_t rv;
(void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
return CDF_TOLE4(rv);
}
#define CDF_UNPACK(a) \
(void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a)
#define CDF_UNPACKA(a) \
(void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a)
uint16_t
cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
uint32_t
cdf_tole4(uint32_t sv)
{
return CDF_TOLE4(sv);
}
uint64_t
cdf_tole8(uint64_t sv)
{
return CDF_TOLE8(sv);
}
void
cdf_swap_header(cdf_header_t *h)
{
size_t i;
h->h_magic = CDF_TOLE8(h->h_magic);
h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]);
h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]);
h->h_revision = CDF_TOLE2(h->h_revision);
h->h_version = CDF_TOLE2(h->h_version);
h->h_byte_order = CDF_TOLE2(h->h_byte_order);
h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2);
h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2);
h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat);
h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory);
h->h_min_size_standard_stream =
CDF_TOLE4(h->h_min_size_standard_stream);
h->h_secid_first_sector_in_short_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat);
h->h_num_sectors_in_short_sat =
CDF_TOLE4(h->h_num_sectors_in_short_sat);
h->h_secid_first_sector_in_master_sat =
CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat);
h->h_num_sectors_in_master_sat =
CDF_TOLE4(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]);
}
void
cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
void
cdf_swap_dir(cdf_directory_t *d)
{
d->d_namelen = CDF_TOLE2(d->d_namelen);
d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child);
d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child);
d->d_storage = CDF_TOLE4((uint32_t)d->d_storage);
d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
d->d_flags = CDF_TOLE4(d->d_flags);
d->d_created = CDF_TOLE8((uint64_t)d->d_created);
d->d_modified = CDF_TOLE8((uint64_t)d->d_modified);
d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector);
d->d_size = CDF_TOLE4(d->d_size);
}
void
cdf_swap_class(cdf_classid_t *d)
{
d->cl_dword = CDF_TOLE4(d->cl_dword);
d->cl_word[0] = CDF_TOLE2(d->cl_word[0]);
d->cl_word[1] = CDF_TOLE2(d->cl_word[1]);
}
void
cdf_unpack_dir(cdf_directory_t *d, char *buf)
{
size_t len = 0;
CDF_UNPACKA(d->d_name);
CDF_UNPACK(d->d_namelen);
CDF_UNPACK(d->d_type);
CDF_UNPACK(d->d_color);
CDF_UNPACK(d->d_left_child);
CDF_UNPACK(d->d_right_child);
CDF_UNPACK(d->d_storage);
CDF_UNPACKA(d->d_storage_uuid);
CDF_UNPACK(d->d_flags);
CDF_UNPACK(d->d_created);
CDF_UNPACK(d->d_modified);
CDF_UNPACK(d->d_stream_first_sector);
CDF_UNPACK(d->d_size);
CDF_UNPACK(d->d_unused0);
}
static int
cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
const void *p, size_t tail, int line)
{
const char *b = (const char *)sst->sst_tab;
const char *e = ((const char *)p) + tail;
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
(void)&line;
if (e >= b && (size_t)(e - b) <= ss * sst->sst_len)
return 0;
DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
" > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
ss * sst->sst_len, ss, sst->sst_len));
errno = EFTYPE;
return -1;
}
static ssize_t
cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len)
{
size_t siz = (size_t)off + len;
if ((off_t)(off + len) != (off_t)siz) {
errno = EINVAL;
return -1;
}
if (info->i_buf != NULL && info->i_len >= siz) {
(void)memcpy(buf, &info->i_buf[off], len);
return (ssize_t)len;
}
if (info->i_fd == -1)
return -1;
if (pread(info->i_fd, buf, len, off) != (ssize_t)len)
return -1;
return (ssize_t)len;
}
int
cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
{
char buf[512];
(void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1)
return -1;
cdf_unpack_header(h, buf);
cdf_swap_header(h);
if (h->h_magic != CDF_MAGIC) {
DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%"
INT64_T_FORMAT "x\n",
(unsigned long long)h->h_magic,
(unsigned long long)CDF_MAGIC));
goto out;
}
if (h->h_sec_size_p2 > 20) {
DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2));
goto out;
}
if (h->h_short_sec_size_p2 > 20) {
DPRINTF(("Bad short sector size 0x%u\n",
h->h_short_sec_size_p2));
goto out;
}
return 0;
out:
errno = EFTYPE;
return -1;
}
ssize_t
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);
}
ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
pos + len, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
/*
* Read the sector allocation table.
*/
int
cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat)
{
size_t i, j, k;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t *msa, mid, sec;
size_t nsatpersec = (ss / sizeof(mid)) - 1;
for (i = 0; i < __arraycount(h->h_master_sat); i++)
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
#define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss))
if ((nsatpersec > 0 &&
h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) ||
i > CDF_SEC_LIMIT) {
DPRINTF(("Number of sectors in master SAT too big %u %"
SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i));
errno = EFTYPE;
return -1;
}
sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i;
DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n",
sat->sat_len, ss));
if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss)))
== NULL)
return -1;
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] < 0)
break;
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
h->h_master_sat[i]) != (ssize_t)ss) {
DPRINTF(("Reading sector %d", h->h_master_sat[i]));
goto out1;
}
}
if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL)
goto out1;
mid = h->h_secid_first_sector_in_master_sat;
for (j = 0; j < h->h_num_sectors_in_master_sat; j++) {
if (mid < 0)
goto out;
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Reading master sector loop limit"));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) {
DPRINTF(("Reading master sector %d", mid));
goto out2;
}
for (k = 0; k < nsatpersec; k++, i++) {
sec = CDF_TOLE4((uint32_t)msa[k]);
if (sec < 0)
goto out;
if (i >= sat->sat_len) {
DPRINTF(("Out of bounds reading MSA %" SIZE_T_FORMAT
"u >= %" SIZE_T_FORMAT "u", i, sat->sat_len));
errno = EFTYPE;
goto out2;
}
if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
sec) != (ssize_t)ss) {
DPRINTF(("Reading sector %d",
CDF_TOLE4(msa[k])));
goto out2;
}
}
mid = CDF_TOLE4((uint32_t)msa[nsatpersec]);
}
out:
sat->sat_len = i;
free(msa);
return 0;
out2:
free(msa);
out1:
free(sat->sat_tab);
return -1;
}
size_t
cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
{
size_t i, j;
cdf_secid_t maxsector = (cdf_secid_t)((sat->sat_len * size)
/ sizeof(maxsector));
DPRINTF(("Chain:"));
for (j = i = 0; sid >= 0; i++, j++) {
DPRINTF((" %d", sid));
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Counting chain loop limit"));
errno = EFTYPE;
return (size_t)-1;
}
if (sid >= maxsector) {
DPRINTF(("Sector %d >= %d\n", sid, maxsector));
errno = EFTYPE;
return (size_t)-1;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (i == 0) {
DPRINTF((" none, sid: %d\n", sid));
return (size_t)-1;
}
DPRINTF(("\n"));
return i;
}
int
cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SEC_SIZE(h), i, j;
ssize_t nr;
scn->sst_len = cdf_count_chain(sat, sid, ss);
scn->sst_dirlen = len;
if (scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read long sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading long sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
scn->sst_len));
errno = EFTYPE;
goto out;
}
if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h,
sid)) != (ssize_t)ss) {
if (i == scn->sst_len - 1 && nr > 0) {
/* Last sector might be truncated */
return 0;
}
DPRINTF(("Reading long sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1)
return -1;
scn->sst_tab = calloc(scn->sst_len, ss);
if (scn->sst_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sector chain loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= scn->sst_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n",
i, scn->sst_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h,
sid) != (ssize_t)ss) {
DPRINTF(("Reading short sector chain %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]);
}
return 0;
out:
free(scn->sst_tab);
return -1;
}
int
cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
scn);
else
return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
}
int
cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_dir_t *dir)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h), ns, nd;
char *buf;
cdf_secid_t sid = h->h_secid_first_directory;
ns = cdf_count_chain(sat, sid, ss);
if (ns == (size_t)-1)
return -1;
nd = ss / CDF_DIRECTORY_SIZE;
dir->dir_len = ns * nd;
dir->dir_tab = CAST(cdf_directory_t *,
calloc(dir->dir_len, sizeof(dir->dir_tab[0])));
if (dir->dir_tab == NULL)
return -1;
if ((buf = CAST(char *, malloc(ss))) == NULL) {
free(dir->dir_tab);
return -1;
}
for (j = i = 0; i < ns; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read dir loop limit"));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) {
DPRINTF(("Reading directory sector %d", sid));
goto out;
}
for (j = 0; j < nd; j++) {
cdf_unpack_dir(&dir->dir_tab[i * nd + j],
&buf[j * CDF_DIRECTORY_SIZE]);
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (NEED_SWAP)
for (i = 0; i < dir->dir_len; i++)
cdf_swap_dir(&dir->dir_tab[i]);
free(buf);
return 0;
out:
free(dir->dir_tab);
free(buf);
return -1;
}
int
cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_sat_t *ssat)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h));
if (ssat->sat_len == (size_t)-1)
return -1;
ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss));
if (ssat->sat_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sat sector loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= ssat->sat_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
ssat->sat_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
(ssize_t)ss) {
DPRINTF(("Reading short sat sector %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(ssat->sat_tab);
return -1;
}
int
cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn,
const cdf_directory_t **root)
{
size_t i;
const cdf_directory_t *d;
*root = NULL;
for (i = 0; i < dir->dir_len; i++)
if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE)
break;
/* If the it is not there, just fake it; some docs don't have it */
if (i == dir->dir_len)
goto out;
d = &dir->dir_tab[i];
*root = d;
/* If the it is not there, just fake it; some docs don't have it */
if (d->d_stream_first_sector < 0)
goto out;
return cdf_read_long_sector_chain(info, h, sat,
d->d_stream_first_sector, d->d_size, scn);
out:
scn->sst_tab = NULL;
scn->sst_len = 0;
scn->sst_dirlen = 0;
return 0;
}
static int
cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return (unsigned char)*d - CDF_TOLE2(*s);
return 0;
}
int
cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
return cdf_read_user_stream(info, h, sat, ssat, sst, dir,
"\05SummaryInformation", scn);
}
int
cdf_read_catalog(cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
return cdf_read_user_stream(info, h, sat, ssat, sst, dir,
"Catalog", scn);
}
int
cdf_read_user_stream(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, const char *name, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
size_t name_len = strlen(name) + 1;
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len)
== 0)
break;
if (i == 0) {
DPRINTF(("Cannot find user stream `%s'\n", name));
errno = ESRCH;
return -1;
}
d = &dir->dir_tab[i - 1];
return cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, scn);
}
int
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t tail = (i << 1) + 1;
if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t),
__LINE__) == -1)
goto out;
size_t ofs = CDF_GETUINT32(p, tail);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
int
cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE4(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info,
count, &maxcount) == -1)
return -1;
return 0;
}
#define extract_catalog_field(f, l) \
memcpy(&ce[i].f, b + (l), sizeof(ce[i].f)); \
ce[i].f = CDF_TOLE(ce[i].f)
int
cdf_unpack_catalog(const cdf_header_t *h, const cdf_stream_t *sst,
cdf_catalog_t **cat)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
const char *b = CAST(const char *, sst->sst_tab);
const char *eb = b + ss * sst->sst_len;
size_t nr, i, k;
cdf_catalog_entry_t *ce;
uint16_t reclen;
const uint16_t *np;
for (nr = 0; b < eb; nr++) {
memcpy(&reclen, b, sizeof(reclen));
reclen = CDF_TOLE2(reclen);
if (reclen == 0)
break;
b += reclen;
}
*cat = CAST(cdf_catalog_t *,
malloc(sizeof(cdf_catalog_t) + nr * sizeof(*ce)));
(*cat)->cat_num = nr;
ce = (*cat)->cat_e;
b = CAST(const char *, sst->sst_tab);
for (i = 0; i < nr; i++) {
extract_catalog_field(ce_namlen, 0);
extract_catalog_field(ce_num, 2);
extract_catalog_field(ce_timestamp, 6);
reclen = ce[i].ce_namlen;
ce[i].ce_namlen =
sizeof(ce[i].ce_name) / sizeof(ce[i].ce_name[0]) - 1;
if (ce[i].ce_namlen > reclen - 14)
ce[i].ce_namlen = reclen - 14;
np = CAST(const uint16_t *, (b + 16));
for (k = 0; k < ce[i].ce_namlen; k++) {
ce[i].ce_name[k] = np[k];
CDF_TOLE2(ce[i].ce_name[k]);
}
ce[i].ce_name[ce[i].ce_namlen] = 0;
b += reclen;
}
return 0;
}
int
cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
{
return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
"%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
id->cl_six[5]);
}
static const struct {
uint32_t v;
const char *n;
} vn[] = {
{ CDF_PROPERTY_CODE_PAGE, "Code page" },
{ CDF_PROPERTY_TITLE, "Title" },
{ CDF_PROPERTY_SUBJECT, "Subject" },
{ CDF_PROPERTY_AUTHOR, "Author" },
{ CDF_PROPERTY_KEYWORDS, "Keywords" },
{ CDF_PROPERTY_COMMENTS, "Comments" },
{ CDF_PROPERTY_TEMPLATE, "Template" },
{ CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" },
{ CDF_PROPERTY_REVISION_NUMBER, "Revision Number" },
{ CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" },
{ CDF_PROPERTY_LAST_PRINTED, "Last Printed" },
{ CDF_PROPERTY_CREATE_TIME, "Create Time/Date" },
{ CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" },
{ CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" },
{ CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" },
{ CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" },
{ CDF_PROPERTY_THUMBNAIL, "Thumbnail" },
{ CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" },
{ CDF_PROPERTY_SECURITY, "Security" },
{ CDF_PROPERTY_LOCALE_ID, "Locale ID" },
};
int
cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
{
size_t i;
for (i = 0; i < __arraycount(vn); i++)
if (vn[i].v == p)
return snprintf(buf, bufsiz, "%s", vn[i].n);
return snprintf(buf, bufsiz, "0x%x", p);
}
int
cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
{
int len = 0;
int days, hours, mins, secs;
ts /= CDF_TIME_PREC;
secs = (int)(ts % 60);
ts /= 60;
mins = (int)(ts % 60);
ts /= 60;
hours = (int)(ts % 24);
ts /= 24;
days = (int)ts;
if (days) {
len += snprintf(buf + len, bufsiz - len, "%dd+", days);
if ((size_t)len >= bufsiz)
return len;
}
if (days || hours) {
len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
if ((size_t)len >= bufsiz)
return len;
}
len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
if ((size_t)len >= bufsiz)
return len;
len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
return len;
}
char *
cdf_u16tos8(char *buf, size_t len, const uint16_t *p)
{
size_t i;
for (i = 0; i < len && p[i]; i++)
buf[i] = (char)p[i];
buf[i] = '\0';
return buf;
}
#ifdef CDF_DEBUG
void
cdf_dump_header(const cdf_header_t *h)
{
size_t i;
#define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b)
#define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \
h->h_ ## b, 1 << h->h_ ## b)
DUMP("%d", revision);
DUMP("%d", version);
DUMP("0x%x", byte_order);
DUMP2("%d", sec_size_p2);
DUMP2("%d", short_sec_size_p2);
DUMP("%d", num_sectors_in_sat);
DUMP("%d", secid_first_directory);
DUMP("%d", min_size_standard_stream);
DUMP("%d", secid_first_sector_in_short_sat);
DUMP("%d", num_sectors_in_short_sat);
DUMP("%d", secid_first_sector_in_master_sat);
DUMP("%d", num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
(void)fprintf(stderr, "%35.35s[%.3zu] = %d\n",
"master_sat", i, h->h_master_sat[i]);
}
}
void
cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
{
size_t i, j, s = size / sizeof(cdf_secid_t);
for (i = 0; i < sat->sat_len; i++) {
(void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
SIZE_T_FORMAT "u: ", prefix, i, i * s);
for (j = 0; j < s; j++) {
(void)fprintf(stderr, "%5d, ",
CDF_TOLE4(sat->sat_tab[s * i + j]));
if ((j + 1) % 10 == 0)
(void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
"u: ", i * s + j + 1);
}
(void)fprintf(stderr, "\n");
}
}
void
cdf_dump(void *v, size_t len)
{
size_t i, j;
unsigned char *p = v;
char abuf[16];
(void)fprintf(stderr, "%.4x: ", 0);
for (i = 0, j = 0; i < len; i++, p++) {
(void)fprintf(stderr, "%.2x ", *p);
abuf[j++] = isprint(*p) ? *p : '.';
if (j == 16) {
j = 0;
abuf[15] = '\0';
(void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ",
abuf, i + 1);
}
}
(void)fprintf(stderr, "\n");
}
void
cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
cdf_dump(sst->sst_tab, ss * sst->sst_len);
}
void
cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir)
{
size_t i, j;
cdf_directory_t *d;
char name[__arraycount(d->d_name)];
cdf_stream_t scn;
struct timespec ts;
static const char *types[] = { "empty", "user storage",
"user stream", "lockbytes", "property", "root storage" };
for (i = 0; i < dir->dir_len; i++) {
char buf[26];
d = &dir->dir_tab[i];
for (j = 0; j < sizeof(name); j++)
name[j] = (char)CDF_TOLE2(d->d_name[j]);
(void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n",
i, name);
if (d->d_type < __arraycount(types))
(void)fprintf(stderr, "Type: %s\n", types[d->d_type]);
else
(void)fprintf(stderr, "Type: %d\n", d->d_type);
(void)fprintf(stderr, "Color: %s\n",
d->d_color ? "black" : "red");
(void)fprintf(stderr, "Left child: %d\n", d->d_left_child);
(void)fprintf(stderr, "Right child: %d\n", d->d_right_child);
(void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags);
cdf_timestamp_to_timespec(&ts, d->d_created);
(void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf));
cdf_timestamp_to_timespec(&ts, d->d_modified);
(void)fprintf(stderr, "Modified %s",
cdf_ctime(&ts.tv_sec, buf));
(void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector);
(void)fprintf(stderr, "Size %d\n", d->d_size);
switch (d->d_type) {
case CDF_DIR_TYPE_USER_STORAGE:
(void)fprintf(stderr, "Storage: %d\n", d->d_storage);
break;
case CDF_DIR_TYPE_USER_STREAM:
if (sst == NULL)
break;
if (cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, &scn) == -1) {
warn("Can't read stream for %s at %d len %d",
name, d->d_stream_first_sector, d->d_size);
break;
}
cdf_dump_stream(h, &scn);
free(scn.sst_tab);
break;
default:
break;
}
}
}
void
cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
{
cdf_timestamp_t tp;
struct timespec ts;
char buf[64];
size_t i, j;
for (i = 0; i < count; i++) {
cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
(void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
switch (info[i].pi_type) {
case CDF_NULL:
break;
case CDF_SIGNED16:
(void)fprintf(stderr, "signed 16 [%hd]\n",
info[i].pi_s16);
break;
case CDF_SIGNED32:
(void)fprintf(stderr, "signed 32 [%d]\n",
info[i].pi_s32);
break;
case CDF_UNSIGNED32:
(void)fprintf(stderr, "unsigned 32 [%u]\n",
info[i].pi_u32);
break;
case CDF_FLOAT:
(void)fprintf(stderr, "float [%g]\n",
info[i].pi_f);
break;
case CDF_DOUBLE:
(void)fprintf(stderr, "double [%g]\n",
info[i].pi_d);
break;
case CDF_LENGTH32_STRING:
(void)fprintf(stderr, "string %u [%.*s]\n",
info[i].pi_str.s_len,
info[i].pi_str.s_len, info[i].pi_str.s_buf);
break;
case CDF_LENGTH32_WSTRING:
(void)fprintf(stderr, "string %u [",
info[i].pi_str.s_len);
for (j = 0; j < info[i].pi_str.s_len - 1; j++)
(void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
(void)fprintf(stderr, "]\n");
break;
case CDF_FILETIME:
tp = info[i].pi_tp;
if (tp < 1000000000000000LL) {
cdf_print_elapsed_time(buf, sizeof(buf), tp);
(void)fprintf(stderr, "timestamp %s\n", buf);
} else {
char buf[26];
cdf_timestamp_to_timespec(&ts, tp);
(void)fprintf(stderr, "timestamp %s",
cdf_ctime(&ts.tv_sec, buf));
}
break;
case CDF_CLIPBOARD:
(void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
break;
default:
DPRINTF(("Don't know how to deal with %x\n",
info[i].pi_type));
break;
}
}
}
void
cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst)
{
char buf[128];
cdf_summary_info_header_t ssi;
cdf_property_info_t *info;
size_t count;
(void)&h;
if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1)
return;
(void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order);
(void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff,
ssi.si_os_version >> 8);
(void)fprintf(stderr, "Os %d\n", ssi.si_os);
cdf_print_classid(buf, sizeof(buf), &ssi.si_class);
(void)fprintf(stderr, "Class %s\n", buf);
(void)fprintf(stderr, "Count %d\n", ssi.si_count);
cdf_dump_property_info(info, count);
free(info);
}
void
cdf_dump_catalog(const cdf_header_t *h, const cdf_stream_t *sst)
{
cdf_catalog_t *cat;
cdf_unpack_catalog(h, sst, &cat);
const cdf_catalog_entry_t *ce = cat->cat_e;
struct timespec ts;
char tbuf[64], sbuf[256];
size_t i;
printf("Catalog:\n");
for (i = 0; i < cat->cat_num; i++) {
cdf_timestamp_to_timespec(&ts, ce[i].ce_timestamp);
printf("\t%d %s %s", ce[i].ce_num,
cdf_u16tos8(sbuf, ce[i].ce_namlen, ce[i].ce_name),
cdf_ctime(&ts.tv_sec, tbuf));
}
free(cat);
}
#endif
#ifdef TEST
int
main(int argc, char *argv[])
{
int i;
cdf_header_t h;
cdf_sat_t sat, ssat;
cdf_stream_t sst, scn;
cdf_dir_t dir;
cdf_info_t info;
const cdf_directory_t *root;
if (argc < 2) {
(void)fprintf(stderr, "Usage: %s <filename>\n", getprogname());
return -1;
}
info.i_buf = NULL;
info.i_len = 0;
for (i = 1; i < argc; i++) {
if ((info.i_fd = open(argv[1], O_RDONLY)) == -1)
err(1, "Cannot open `%s'", argv[1]);
if (cdf_read_header(&info, &h) == -1)
err(1, "Cannot read header");
#ifdef CDF_DEBUG
cdf_dump_header(&h);
#endif
if (cdf_read_sat(&info, &h, &sat) == -1)
err(1, "Cannot read sat");
#ifdef CDF_DEBUG
cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h));
#endif
if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1)
err(1, "Cannot read ssat");
#ifdef CDF_DEBUG
cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h));
#endif
if (cdf_read_dir(&info, &h, &sat, &dir) == -1)
err(1, "Cannot read dir");
if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst, &root)
== -1)
err(1, "Cannot read short stream");
#ifdef CDF_DEBUG
cdf_dump_stream(&h, &sst);
#endif
#ifdef CDF_DEBUG
cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir);
#endif
if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir,
&scn) == -1)
warn("Cannot read summary info");
#ifdef CDF_DEBUG
else
cdf_dump_summary_info(&h, &scn);
#endif
if (cdf_read_catalog(&info, &h, &sat, &ssat, &sst, &dir,
&scn) == -1)
warn("Cannot read catalog");
#ifdef CDF_DEBUG
else
cdf_dump_catalog(&h, &scn);
#endif
(void)close(info.i_fd);
}
return 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2154_0 |
crossvul-cpp_data_good_3475_0 | /*
* mm/mremap.c
*
* (C) Copyright 1996 Linus Torvalds
*
* Address space accounting code <alan@lxorguk.ukuu.org.uk>
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/mmu_notifier.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "internal.h"
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (pgd_none_or_clear_bad(pgd))
return NULL;
pud = pud_offset(pgd, addr);
if (pud_none_or_clear_bad(pud))
return NULL;
pmd = pmd_offset(pud, addr);
split_huge_page_pmd(mm, pmd);
if (pmd_none_or_clear_bad(pmd))
return NULL;
return pmd;
}
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (!pud)
return NULL;
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
VM_BUG_ON(pmd_trans_huge(*pmd));
if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
return NULL;
return pmd;
}
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
unsigned long new_addr)
{
struct address_space *mapping = NULL;
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
spinlock_t *old_ptl, *new_ptl;
unsigned long old_start;
old_start = old_addr;
mmu_notifier_invalidate_range_start(vma->vm_mm,
old_start, old_end);
if (vma->vm_file) {
/*
* Subtle point from Rajesh Venkatasubramanian: before
* moving file-based ptes, we must lock truncate_pagecache
* out, since it might clean the dst vma before the src vma,
* and we propagate stale pages into the dst afterward.
*/
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
new_vma->vm_truncate_count = 0;
}
/*
* We don't have to worry about the ordering of src and dst
* pte locks because exclusive mmap_sem prevents deadlock.
*/
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
new_pte = pte_offset_map(new_pmd, new_addr);
new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
arch_enter_lazy_mmu_mode();
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
new_pte++, new_addr += PAGE_SIZE) {
if (pte_none(*old_pte))
continue;
pte = ptep_clear_flush(vma, old_addr, old_pte);
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
set_pte_at(mm, new_addr, new_pte, pte);
}
arch_leave_lazy_mmu_mode();
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
pte_unmap_unlock(old_pte - 1, old_ptl);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
}
#define LATENCY_LIMIT (64 * PAGE_SIZE)
unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len)
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end);
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
cond_resched();
next = (old_addr + PMD_SIZE) & PMD_MASK;
if (next - 1 > old_end)
next = old_end;
extent = next - old_addr;
old_pmd = get_old_pmd(vma->vm_mm, old_addr);
if (!old_pmd)
continue;
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
if (!new_pmd)
break;
next = (new_addr + PMD_SIZE) & PMD_MASK;
if (extent > next - new_addr)
extent = next - new_addr;
if (extent > LATENCY_LIMIT)
extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
new_vma, new_pmd, new_addr);
}
return len + old_addr - old_end; /* how much done */
}
static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long old_addr, unsigned long old_len,
unsigned long new_len, unsigned long new_addr)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma;
unsigned long vm_flags = vma->vm_flags;
unsigned long new_pgoff;
unsigned long moved_len;
unsigned long excess = 0;
unsigned long hiwater_vm;
int split = 0;
int err;
/*
* We'd prefer to avoid failure later on in do_munmap:
* which may split one vma into three before unmapping.
*/
if (mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM;
/*
* Advise KSM to break any KSM pages in the area to be moved:
* it would be confusing if they were to turn up at the new
* location, where they happen to coincide with different KSM
* pages recently unmapped. But leave vma->vm_flags as it was,
* so KSM can come around to merge on vma and new_vma afterwards.
*/
err = ksm_madvise(vma, old_addr, old_addr + old_len,
MADV_UNMERGEABLE, &vm_flags);
if (err)
return err;
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
if (!new_vma)
return -ENOMEM;
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
if (moved_len < old_len) {
/*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
* and then proceed to unmap new area instead of old.
*/
move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
vma = new_vma;
old_len = new_len;
old_addr = new_addr;
new_addr = -ENOMEM;
}
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) {
vma->vm_flags &= ~VM_ACCOUNT;
excess = vma->vm_end - vma->vm_start - old_len;
if (old_addr > vma->vm_start &&
old_addr + old_len < vma->vm_end)
split = 1;
}
/*
* If we failed to move page tables we still do total_vm increment
* since do_munmap() will decrement it by old_len == new_len.
*
* Since total_vm is about to be raised artificially high for a
* moment, we need to restore high watermark afterwards: if stats
* are taken meanwhile, total_vm and hiwater_vm appear too high.
* If this were a serious issue, we'd add a flag to do_munmap().
*/
hiwater_vm = mm->hiwater_vm;
mm->total_vm += new_len >> PAGE_SHIFT;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (do_munmap(mm, old_addr, old_len) < 0) {
/* OOM: unable to split vma, just get accounts right */
vm_unacct_memory(excess >> PAGE_SHIFT);
excess = 0;
}
mm->hiwater_vm = hiwater_vm;
/* Restore VM_ACCOUNT if one or two pieces of vma left */
if (excess) {
vma->vm_flags |= VM_ACCOUNT;
if (split)
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
if (vm_flags & VM_LOCKED) {
mm->locked_vm += new_len >> PAGE_SHIFT;
if (new_len > old_len)
mlock_vma_pages_range(new_vma, new_addr + old_len,
new_addr + new_len);
}
return new_addr;
}
static struct vm_area_struct *vma_to_resize(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long *p)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
goto Efault;
if (is_vm_hugetlb_page(vma))
goto Einval;
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
goto Efault;
/* Need to be careful about a growing mapping */
if (new_len > old_len) {
unsigned long pgoff;
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
goto Efault;
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
goto Einval;
}
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
locked = mm->locked_vm << PAGE_SHIFT;
lock_limit = rlimit(RLIMIT_MEMLOCK);
locked += new_len - old_len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
goto Eagain;
}
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
goto Enomem;
if (vma->vm_flags & VM_ACCOUNT) {
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
if (security_vm_enough_memory(charged))
goto Efault;
*p = charged;
}
return vma;
Efault: /* very odd choice for most of the cases, but... */
return ERR_PTR(-EFAULT);
Einval:
return ERR_PTR(-EINVAL);
Enomem:
return ERR_PTR(-ENOMEM);
Eagain:
return ERR_PTR(-EAGAIN);
}
static unsigned long mremap_to(unsigned long addr,
unsigned long old_len, unsigned long new_addr,
unsigned long new_len)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long charged = 0;
unsigned long map_flags;
if (new_addr & ~PAGE_MASK)
goto out;
if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
goto out;
/* Check if the location we're moving into overlaps the
* old location at all, and fail if it does.
*/
if ((new_addr <= addr) && (new_addr+new_len) > addr)
goto out;
if ((addr <= new_addr) && (addr+old_len) > new_addr)
goto out;
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
if (ret)
goto out;
ret = do_munmap(mm, new_addr, new_len);
if (ret)
goto out;
if (old_len >= new_len) {
ret = do_munmap(mm, addr+new_len, old_len - new_len);
if (ret && old_len != new_len)
goto out;
old_len = new_len;
}
vma = vma_to_resize(addr, old_len, new_len, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
map_flags = MAP_FIXED;
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
if (ret & ~PAGE_MASK)
goto out1;
ret = move_vma(vma, addr, old_len, new_len, new_addr);
if (!(ret & ~PAGE_MASK))
goto out;
out1:
vm_unacct_memory(charged);
out:
return ret;
}
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
unsigned long end = vma->vm_end + delta;
if (end < vma->vm_end) /* overflow */
return 0;
if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
return 0;
if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
0, MAP_FIXED) & ~PAGE_MASK)
return 0;
return 1;
}
/*
* Expand (or shrink) an existing mapping, potentially moving it at the
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
*
* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
* This option implies MREMAP_MAYMOVE.
*/
unsigned long do_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long charged = 0;
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
goto out;
if (addr & ~PAGE_MASK)
goto out;
old_len = PAGE_ALIGN(old_len);
new_len = PAGE_ALIGN(new_len);
/*
* We allow a zero old-len as a special case
* for DOS-emu "duplicate shm area" thing. But
* a zero new-len is nonsensical.
*/
if (!new_len)
goto out;
if (flags & MREMAP_FIXED) {
if (flags & MREMAP_MAYMOVE)
ret = mremap_to(addr, old_len, new_addr, new_len);
goto out;
}
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_munmap does all the needed commit accounting
*/
if (old_len >= new_len) {
ret = do_munmap(mm, addr+new_len, old_len - new_len);
if (ret && old_len != new_len)
goto out;
ret = addr;
goto out;
}
/*
* Ok, we need to grow..
*/
vma = vma_to_resize(addr, old_len, new_len, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
/* old_len exactly to the end of the area..
*/
if (old_len == vma->vm_end - addr) {
/* can we just expand the current mapping? */
if (vma_expandable(vma, new_len - old_len)) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
if (vma_adjust(vma, vma->vm_start, addr + new_len,
vma->vm_pgoff, NULL)) {
ret = -ENOMEM;
goto out;
}
mm->total_vm += pages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm += pages;
mlock_vma_pages_range(vma, addr + old_len,
addr + new_len);
}
ret = addr;
goto out;
}
}
/*
* We weren't able to just expand or shrink the area,
* we need to create a new one and move it..
*/
ret = -ENOMEM;
if (flags & MREMAP_MAYMOVE) {
unsigned long map_flags = 0;
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
if (new_addr & ~PAGE_MASK) {
ret = new_addr;
goto out;
}
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
if (ret)
goto out;
ret = move_vma(vma, addr, old_len, new_len, new_addr);
}
out:
if (ret & ~PAGE_MASK)
vm_unacct_memory(charged);
return ret;
}
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long, new_len, unsigned long, flags,
unsigned long, new_addr)
{
unsigned long ret;
down_write(¤t->mm->mmap_sem);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
up_write(¤t->mm->mmap_sem);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3475_0 |
crossvul-cpp_data_bad_3653_0 | /*
* Copyright © 2008,2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 3. Read/written by GPU
* 4. set_domain to (CPU,CPU)
* 5. Read/written by CPU
* 6. Read/written by GPU
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* 3. Read/written by GPU
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* 5. Read/written by CPU
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct change_domains *cd)
{
uint32_t invalidate_domains = 0, flush_domains = 0;
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (obj->base.pending_write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
if (obj->base.write_domain &&
(((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain;
invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain;
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
* write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that.
*/
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
int and;
struct hlist_head buckets[0];
};
static struct eb_objects *
eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_KERNEL);
if (eb == NULL)
return eb;
eb->and = count - 1;
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
hlist_add_head(&obj->exec_node,
&eb->buckets[obj->exec_handle & eb->and]);
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
struct hlist_head *head;
struct hlist_node *node;
struct drm_i915_gem_object *obj;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
return NULL;
}
static void
eb_destroy(struct eb_objects *eb)
{
kfree(eb);
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
if (unlikely(target_obj == NULL))
return -ENOENT;
target_offset = to_intel_bo(target_obj)->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
return ret;
}
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (target_offset == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
return ret;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
return ret;
}
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && in_atomic())
return -EFAULT;
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
return ret;
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
iowrite32(reloc->delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
return 0;
}
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
struct drm_i915_gem_relocation_entry reloc;
if (__copy_from_user_inatomic(&reloc,
user_relocs+i,
sizeof(reloc)))
return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret)
return ret;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
&reloc.presumed_offset,
sizeof(reloc.presumed_offset)))
return -EFAULT;
}
return 0;
}
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret)
return ret;
}
return 0;
}
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
}
pagefault_enable();
return ret;
}
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
return ret;
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
goto err_unpin;
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
obj->pending_fenced_gpu_access = true;
}
}
entry->offset = obj->gtt_offset;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
return ret;
}
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
*
* 1a. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 1b. Increment pin count for already bound objects.
* 2. Bind new objects.
* 3. Decrement pin count.
*
* This avoid unnecessary unbinding of later objects in order to makr
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
if (!obj->gtt_space)
continue;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space)
continue;
ret = pin_and_fence_object(obj, ring);
if (ret) {
int ret_ignore;
/* This can potentially raise a harmless
* -EINVAL if we failed to bind in the above
* call. It cannot raise -EINTR since we know
* that the bo is freshly bound and so will
* not need to be flushed or waited upon.
*/
ret_ignore = i915_gem_object_unbind(obj);
(void)ret_ignore;
WARN_ON(obj->gtt_space);
break;
}
}
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
/* ... and ensure ppgtt mapping exist if needed. */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
}
if (ret != -ENOSPC || retry > 1)
return ret;
/* First attempt, just clear anything that is purgeable.
* Second attempt, clear the entire GTT.
*/
ret = i915_gem_evict_everything(ring->dev, retry == 0);
if (ret)
return ret;
retry++;
} while (1);
err:
list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
}
return ret;
}
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
int *reloc_offset;
int i, total, ret;
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
total = 0;
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
reloc = drm_malloc_ab(total, sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
drm_free_large(reloc);
drm_free_large(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
}
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret) {
mutex_lock(&dev->struct_mutex);
goto err;
}
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
}
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret)
goto err;
list_for_each_entry(obj, objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
if (ret)
goto err;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
drm_free_large(reloc);
drm_free_large(reloc_offset);
return ret;
}
static int
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
}
static bool
intel_enable_semaphores(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Disable semaphores on SNB */
if (INTEL_INFO(dev)->gen == 6)
return 0;
return 1;
}
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
{
struct intel_ring_buffer *from = obj->ring;
u32 seqno;
int ret, idx;
if (from == NULL || to == from)
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
seqno = obj->last_rendering_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
if (seqno == from->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
}
seqno = request->seqno;
}
from->sync_seqno[idx] = seqno;
return to->sync_to(to, from, seqno - 1);
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct change_domains cd;
int ret;
memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
if (ret)
return ret;
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
if (ret)
return ret;
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
return ret;
}
return 0;
}
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
return -EINVAL;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
if (fault_in_pages_readable(ptr, length))
return -EFAULT;
}
return 0;
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
u32 seqno)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
u32 invalidate;
/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires.
*
* The sampler always gets flushed on i965 (sigh).
*/
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request);
}
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
ret = intel_ring_begin(ring, 4 * 3);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
return 0;
}
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4)
return -EINVAL;
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
(struct drm_clip_rect __user *)(uintptr_t)
args->cliprects_ptr,
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
}
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
BUG_ON(ring->sync_seqno[i]);
}
}
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
if (ret)
goto err;
}
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
kfree(cliprects);
return ret;
}
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
*/
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].handle = exec_list[i].handle;
exec2_list[i].relocation_count = exec_list[i].relocation_count;
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
}
exec2.buffers_ptr = args->buffers_ptr;
exec2.buffer_count = args->buffer_count;
exec2.batch_start_offset = args->batch_start_offset;
exec2.batch_len = args->batch_len;
exec2.DR1 = args->DR1;
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec_list);
drm_free_large(exec2_list);
return ret;
}
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec2_list);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3653_0 |
crossvul-cpp_data_good_1831_0 | /* fat.c - Read/write access to the FAT
Copyright (C) 1993 Werner Almesberger <werner.almesberger@lrc.di.epfl.ch>
Copyright (C) 1998 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
Copyright (C) 2008-2014 Daniel Baumann <mail@daniel-baumann.ch>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The complete text of the GNU General Public License
can be found in /usr/share/common-licenses/GPL-3 file.
*/
/* FAT32, VFAT, Atari format support, and various fixes additions May 1998
* by Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "common.h"
#include "fsck.fat.h"
#include "io.h"
#include "check.h"
#include "fat.h"
/**
* Fetch the FAT entry for a specified cluster.
*
* @param[out] entry Cluster to which cluster of interest is linked
* @param[in] fat FAT table for the partition
* @param[in] cluster Cluster of interest
* @param[in] fs Information from the FAT boot sectors (bits per FAT entry)
*/
void get_fat(FAT_ENTRY * entry, void *fat, uint32_t cluster, DOS_FS * fs)
{
unsigned char *ptr;
switch (fs->fat_bits) {
case 12:
ptr = &((unsigned char *)fat)[cluster * 3 / 2];
entry->value = 0xfff & (cluster & 1 ? (ptr[0] >> 4) | (ptr[1] << 4) :
(ptr[0] | ptr[1] << 8));
break;
case 16:
entry->value = le16toh(((unsigned short *)fat)[cluster]);
break;
case 32:
/* According to M$, the high 4 bits of a FAT32 entry are reserved and
* are not part of the cluster number. So we cut them off. */
{
uint32_t e = le32toh(((unsigned int *)fat)[cluster]);
entry->value = e & 0xfffffff;
entry->reserved = e >> 28;
}
break;
default:
die("Bad FAT entry size: %d bits.", fs->fat_bits);
}
}
/**
* Build a bookkeeping structure from the partition's FAT table.
* If the partition has multiple FATs and they don't agree, try to pick a winner,
* and queue a command to overwrite the loser.
* One error that is fixed here is a cluster that links to something out of range.
*
* @param[inout] fs Information about the filesystem
*/
void read_fat(DOS_FS * fs)
{
int eff_size, alloc_size;
uint32_t i;
void *first, *second = NULL;
int first_ok, second_ok;
uint32_t total_num_clusters;
/* Clean up from previous pass */
if (fs->fat)
free(fs->fat);
if (fs->cluster_owner)
free(fs->cluster_owner);
fs->fat = NULL;
fs->cluster_owner = NULL;
total_num_clusters = fs->clusters + 2UL;
eff_size = (total_num_clusters * fs->fat_bits + 7) / 8ULL;
if (fs->fat_bits != 12)
alloc_size = eff_size;
else
/* round up to an even number of FAT entries to avoid special
* casing the last entry in get_fat() */
alloc_size = (total_num_clusters * 12 + 23) / 24 * 3;
first = alloc(alloc_size);
fs_read(fs->fat_start, eff_size, first);
if (fs->nfats > 1) {
second = alloc(alloc_size);
fs_read(fs->fat_start + fs->fat_size, eff_size, second);
}
if (second && memcmp(first, second, eff_size) != 0) {
FAT_ENTRY first_media, second_media;
get_fat(&first_media, first, 0, fs);
get_fat(&second_media, second, 0, fs);
first_ok = (first_media.value & FAT_EXTD(fs)) == FAT_EXTD(fs);
second_ok = (second_media.value & FAT_EXTD(fs)) == FAT_EXTD(fs);
if (first_ok && !second_ok) {
printf("FATs differ - using first FAT.\n");
fs_write(fs->fat_start + fs->fat_size, eff_size, first);
}
if (!first_ok && second_ok) {
printf("FATs differ - using second FAT.\n");
fs_write(fs->fat_start, eff_size, second);
memcpy(first, second, eff_size);
}
if (first_ok && second_ok) {
if (interactive) {
printf("FATs differ but appear to be intact. Use which FAT ?\n"
"1) Use first FAT\n2) Use second FAT\n");
if (get_key("12", "?") == '1') {
fs_write(fs->fat_start + fs->fat_size, eff_size, first);
} else {
fs_write(fs->fat_start, eff_size, second);
memcpy(first, second, eff_size);
}
} else {
printf("FATs differ but appear to be intact. Using first "
"FAT.\n");
fs_write(fs->fat_start + fs->fat_size, eff_size, first);
}
}
if (!first_ok && !second_ok) {
printf("Both FATs appear to be corrupt. Giving up.\n");
exit(1);
}
}
if (second) {
free(second);
}
fs->fat = (unsigned char *)first;
fs->cluster_owner = alloc(total_num_clusters * sizeof(DOS_FILE *));
memset(fs->cluster_owner, 0, (total_num_clusters * sizeof(DOS_FILE *)));
/* Truncate any cluster chains that link to something out of range */
for (i = 2; i < fs->clusters + 2; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
if (curEntry.value == 1) {
printf("Cluster %ld out of range (1). Setting to EOF.\n", (long)(i - 2));
set_fat(fs, i, -1);
}
if (curEntry.value >= fs->clusters + 2 &&
(curEntry.value < FAT_MIN_BAD(fs))) {
printf("Cluster %ld out of range (%ld > %ld). Setting to EOF.\n",
(long)(i - 2), (long)curEntry.value, (long)(fs->clusters + 2 - 1));
set_fat(fs, i, -1);
}
}
}
/**
* Update the FAT entry for a specified cluster
* (i.e., change the cluster it links to).
* Queue a command to write out this change.
*
* @param[in,out] fs Information about the filesystem
* @param[in] cluster Cluster to change
* @param[in] new Cluster to link to
* Special values:
* 0 == free cluster
* -1 == end-of-chain
* -2 == bad cluster
*/
void set_fat(DOS_FS * fs, uint32_t cluster, int32_t new)
{
unsigned char *data = NULL;
int size;
loff_t offs;
if (new == -1)
new = FAT_EOF(fs);
else if ((long)new == -2)
new = FAT_BAD(fs);
switch (fs->fat_bits) {
case 12:
data = fs->fat + cluster * 3 / 2;
offs = fs->fat_start + cluster * 3 / 2;
if (cluster & 1) {
FAT_ENTRY prevEntry;
get_fat(&prevEntry, fs->fat, cluster - 1, fs);
data[0] = ((new & 0xf) << 4) | (prevEntry.value >> 8);
data[1] = new >> 4;
} else {
FAT_ENTRY subseqEntry;
if (cluster != fs->clusters + 1)
get_fat(&subseqEntry, fs->fat, cluster + 1, fs);
else
subseqEntry.value = 0;
data[0] = new & 0xff;
data[1] = (new >> 8) | ((0xff & subseqEntry.value) << 4);
}
size = 2;
break;
case 16:
data = fs->fat + cluster * 2;
offs = fs->fat_start + cluster * 2;
*(unsigned short *)data = htole16(new);
size = 2;
break;
case 32:
{
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, cluster, fs);
data = fs->fat + cluster * 4;
offs = fs->fat_start + cluster * 4;
/* According to M$, the high 4 bits of a FAT32 entry are reserved and
* are not part of the cluster number. So we never touch them. */
*(uint32_t *)data = htole32((new & 0xfffffff) |
(curEntry.reserved << 28));
size = 4;
}
break;
default:
die("Bad FAT entry size: %d bits.", fs->fat_bits);
}
fs_write(offs, size, data);
if (fs->nfats > 1) {
fs_write(offs + fs->fat_size, size, data);
}
}
int bad_cluster(DOS_FS * fs, uint32_t cluster)
{
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, cluster, fs);
return FAT_IS_BAD(fs, curEntry.value);
}
/**
* Get the cluster to which the specified cluster is linked.
* If the linked cluster is marked bad, abort.
*
* @param[in] fs Information about the filesystem
* @param[in] cluster Cluster to follow
*
* @return -1 'cluster' is at the end of the chain
* @return Other values Next cluster in this chain
*/
uint32_t next_cluster(DOS_FS * fs, uint32_t cluster)
{
uint32_t value;
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, cluster, fs);
value = curEntry.value;
if (FAT_IS_BAD(fs, value))
die("Internal error: next_cluster on bad cluster");
return FAT_IS_EOF(fs, value) ? -1 : value;
}
loff_t cluster_start(DOS_FS * fs, uint32_t cluster)
{
return fs->data_start + ((loff_t) cluster -
2) * (uint64_t)fs->cluster_size;
}
/**
* Update internal bookkeeping to show that the specified cluster belongs
* to the specified dentry.
*
* @param[in,out] fs Information about the filesystem
* @param[in] cluster Cluster being assigned
* @param[in] owner Information on dentry that owns this cluster
* (may be NULL)
*/
void set_owner(DOS_FS * fs, uint32_t cluster, DOS_FILE * owner)
{
if (fs->cluster_owner == NULL)
die("Internal error: attempt to set owner in non-existent table");
if (owner && fs->cluster_owner[cluster]
&& (fs->cluster_owner[cluster] != owner))
die("Internal error: attempt to change file owner");
fs->cluster_owner[cluster] = owner;
}
DOS_FILE *get_owner(DOS_FS * fs, uint32_t cluster)
{
if (fs->cluster_owner == NULL)
return NULL;
else
return fs->cluster_owner[cluster];
}
void fix_bad(DOS_FS * fs)
{
uint32_t i;
if (verbose)
printf("Checking for bad clusters.\n");
for (i = 2; i < fs->clusters + 2; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
if (!get_owner(fs, i) && !FAT_IS_BAD(fs, curEntry.value))
if (!fs_test(cluster_start(fs, i), fs->cluster_size)) {
printf("Cluster %lu is unreadable.\n", (unsigned long)i);
set_fat(fs, i, -2);
}
}
}
void reclaim_free(DOS_FS * fs)
{
int reclaimed;
uint32_t i;
if (verbose)
printf("Checking for unused clusters.\n");
reclaimed = 0;
for (i = 2; i < fs->clusters + 2; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
if (!get_owner(fs, i) && curEntry.value &&
!FAT_IS_BAD(fs, curEntry.value)) {
set_fat(fs, i, 0);
reclaimed++;
}
}
if (reclaimed)
printf("Reclaimed %d unused cluster%s (%llu bytes).\n", (int)reclaimed,
reclaimed == 1 ? "" : "s",
(unsigned long long)reclaimed * fs->cluster_size);
}
/**
* Assign the specified owner to all orphan chains (except cycles).
* Break cross-links between orphan chains.
*
* @param[in,out] fs Information about the filesystem
* @param[in] owner dentry to be assigned ownership of orphans
* @param[in,out] num_refs For each orphan cluster [index], how many
* clusters link to it.
* @param[in] start_cluster Where to start scanning for orphans
*/
static void tag_free(DOS_FS * fs, DOS_FILE * owner, uint32_t *num_refs,
uint32_t start_cluster)
{
int prev;
uint32_t i, walk;
if (start_cluster == 0)
start_cluster = 2;
for (i = start_cluster; i < fs->clusters + 2; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
/* If the current entry is the head of an un-owned chain... */
if (curEntry.value && !FAT_IS_BAD(fs, curEntry.value) &&
!get_owner(fs, i) && !num_refs[i]) {
prev = 0;
/* Walk the chain, claiming ownership as we go */
for (walk = i; walk != -1; walk = next_cluster(fs, walk)) {
if (!get_owner(fs, walk)) {
set_owner(fs, walk, owner);
} else {
/* We've run into cross-links between orphaned chains,
* or a cycle with a tail.
* Terminate this orphan chain (break the link)
*/
set_fat(fs, prev, -1);
/* This is not necessary because 'walk' is owned and thus
* will never become the head of a chain (the only case
* that would matter during reclaim to files).
* It's easier to decrement than to prove that it's
* unnecessary.
*/
num_refs[walk]--;
break;
}
prev = walk;
}
}
}
}
/**
* Recover orphan chains to files, handling any cycles or cross-links.
*
* @param[in,out] fs Information about the filesystem
*/
void reclaim_file(DOS_FS * fs)
{
DOS_FILE orphan;
int reclaimed, files;
int changed = 0;
uint32_t i, next, walk;
uint32_t *num_refs = NULL; /* Only for orphaned clusters */
uint32_t total_num_clusters;
if (verbose)
printf("Reclaiming unconnected clusters.\n");
total_num_clusters = fs->clusters + 2UL;
num_refs = alloc(total_num_clusters * sizeof(uint32_t));
memset(num_refs, 0, (total_num_clusters * sizeof(uint32_t)));
/* Guarantee that all orphan chains (except cycles) end cleanly
* with an end-of-chain mark.
*/
for (i = 2; i < total_num_clusters; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
next = curEntry.value;
if (!get_owner(fs, i) && next && next < fs->clusters + 2) {
/* Cluster is linked, but not owned (orphan) */
FAT_ENTRY nextEntry;
get_fat(&nextEntry, fs->fat, next, fs);
/* Mark it end-of-chain if it links into an owned cluster,
* a free cluster, or a bad cluster.
*/
if (get_owner(fs, next) || !nextEntry.value ||
FAT_IS_BAD(fs, nextEntry.value))
set_fat(fs, i, -1);
else
num_refs[next]++;
}
}
/* Scan until all the orphans are accounted for,
* and all cycles and cross-links are broken
*/
do {
tag_free(fs, &orphan, num_refs, changed);
changed = 0;
/* Any unaccounted-for orphans must be part of a cycle */
for (i = 2; i < total_num_clusters; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
if (curEntry.value && !FAT_IS_BAD(fs, curEntry.value) &&
!get_owner(fs, i)) {
if (!num_refs[curEntry.value]--)
die("Internal error: num_refs going below zero");
set_fat(fs, i, -1);
changed = curEntry.value;
printf("Broke cycle at cluster %lu in free chain.\n", (unsigned long)i);
/* If we've created a new chain head,
* tag_free() can claim it
*/
if (num_refs[curEntry.value] == 0)
break;
}
}
}
while (changed);
/* Now we can start recovery */
files = reclaimed = 0;
for (i = 2; i < total_num_clusters; i++)
/* If this cluster is the head of an orphan chain... */
if (get_owner(fs, i) == &orphan && !num_refs[i]) {
DIR_ENT de;
loff_t offset;
files++;
offset = alloc_rootdir_entry(fs, &de, "FSCK%04dREC");
de.start = htole16(i & 0xffff);
if (fs->fat_bits == 32)
de.starthi = htole16(i >> 16);
for (walk = i; walk > 0 && walk != -1;
walk = next_cluster(fs, walk)) {
de.size = htole32(le32toh(de.size) + fs->cluster_size);
reclaimed++;
}
fs_write(offset, sizeof(DIR_ENT), &de);
}
if (reclaimed)
printf("Reclaimed %d unused cluster%s (%llu bytes) in %d chain%s.\n",
reclaimed, reclaimed == 1 ? "" : "s",
(unsigned long long)reclaimed * fs->cluster_size, files,
files == 1 ? "" : "s");
free(num_refs);
}
uint32_t update_free(DOS_FS * fs)
{
uint32_t i;
uint32_t free = 0;
int do_set = 0;
for (i = 2; i < fs->clusters + 2; i++) {
FAT_ENTRY curEntry;
get_fat(&curEntry, fs->fat, i, fs);
if (!get_owner(fs, i) && !FAT_IS_BAD(fs, curEntry.value))
++free;
}
if (!fs->fsinfo_start)
return free;
if (verbose)
printf("Checking free cluster summary.\n");
if (fs->free_clusters != 0xFFFFFFFF) {
if (free != fs->free_clusters) {
printf("Free cluster summary wrong (%ld vs. really %ld)\n",
(long)fs->free_clusters, (long)free);
if (interactive)
printf("1) Correct\n2) Don't correct\n");
else
printf(" Auto-correcting.\n");
if (!interactive || get_key("12", "?") == '1')
do_set = 1;
}
} else {
printf("Free cluster summary uninitialized (should be %ld)\n", (long)free);
if (rw) {
if (interactive)
printf("1) Set it\n2) Leave it uninitialized\n");
else
printf(" Auto-setting.\n");
if (!interactive || get_key("12", "?") == '1')
do_set = 1;
}
}
if (do_set) {
uint32_t le_free = htole32(free);
fs->free_clusters = free;
fs_write(fs->fsinfo_start + offsetof(struct info_sector, free_clusters),
sizeof(le_free), &le_free);
}
return free;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1831_0 |
crossvul-cpp_data_good_1697_0 | /*
* History:
* Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
* to allow user process control of SCSI devices.
* Development Sponsored by Killy Corp. NY NY
*
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
* Copyright (C) 1998 - 2014 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
*/
static int sg_version_num = 30536; /* 2 digits for each component */
#define SG_VERSION_STR "3.5.36"
/*
* D. P. Gilbert (dgilbert@interlog.com), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/moduleparam.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
static char *sg_version_date = "20140603";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
#endif
#define SG_ALLOW_DIO_DEF 0
#define SG_MAX_DEVS 32768
/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
* of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
* than 16 bytes are "variable length" whose length is a multiple of 4
*/
#define SG_MAX_CDB_SIZE 252
/*
* Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
* Then when using 32 bit integers x * m may overflow during the calculation.
* Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
* calculates the same, but prevents the overflow when both m and d
* are "small" numbers (like HZ and USER_HZ).
* Of course an overflow is inavoidable if the result of muldiv doesn't fit
* in 32 bits.
*/
#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
for use by this file descriptor. [Deprecated usage: this variable is also
readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
the kernel (i.e. it is not a module).] */
static int def_reserved_size = -1; /* picks up init parameter */
static int sg_allow_dio = SG_ALLOW_DIO_DEF;
static int scatter_elem_sz = SG_SCATTER_SZ;
static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
static int sg_add_device(struct device *, struct class_interface *);
static void sg_remove_device(struct device *, struct class_interface *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = {
.add_dev = sg_add_device,
.remove_dev = sg_remove_device,
};
typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
struct page **pages;
int page_order;
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
struct sg_request *nextrp; /* NULL -> tail request (slist) */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
/* done protected by rq_list_lock */
char done; /* 0->before bh, 1->before read, 2->read */
struct request *rq;
struct bio *bio;
struct execute_work ew;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
struct list_head sfd_siblings; /* protected by device's sfd_lock */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
unsigned save_scat_len; /* original length of trunc. scat. element */
Sg_request *headrp; /* head of request slist, NULL->empty */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
struct mutex open_rel_lock; /* held when in open() or release() */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
struct list_head sfds;
rwlock_t sfd_lock; /* protect access to sfd list */
atomic_t detaching; /* 0->device usable, 1->device detaching */
bool exclude; /* 1->open(O_EXCL) succeeded and is active */
int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
struct kref d_ref;
} Sg_device;
/* tasklet or soft irq callback */
static void sg_rq_end_io(struct request *rq, int uptodate);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
Sg_request * srp);
static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
const char __user *buf, size_t count, int blocking,
int read_only, int sg_io_owned, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
#define SZ_SG_HEADER sizeof(struct sg_header)
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
#define sg_printk(prefix, sdp, fmt, a...) \
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
open_wait(Sg_device *sdp, int flags)
{
int retval = 0;
if (flags & O_EXCL) {
while (sdp->open_cnt > 0) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->open_cnt));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
} else {
while (sdp->exclude) {
mutex_unlock(&sdp->open_rel_lock);
retval = wait_event_interruptible(sdp->open_wait,
(atomic_read(&sdp->detaching) ||
!sdp->exclude));
mutex_lock(&sdp->open_rel_lock);
if (retval) /* -ERESTARTSYS */
return retval;
if (atomic_read(&sdp->detaching))
return -ENODEV;
}
}
return retval;
}
/* Returns 0 on success, else a negated errno value */
static int
sg_open(struct inode *inode, struct file *filp)
{
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
nonseekable_open(inode, filp);
if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
return -EPERM; /* Can't lock it with read only access */
sdp = sg_get_dev(dev);
if (IS_ERR(sdp))
return PTR_ERR(sdp);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_open: flags=0x%x\n", flags));
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
retval = scsi_device_get(sdp->device);
if (retval)
goto sg_put;
retval = scsi_autopm_get_device(sdp->device);
if (retval)
goto sdp_put;
/* scsi_block_when_processing_errors() may block so bypass
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
}
mutex_lock(&sdp->open_rel_lock);
if (flags & O_NONBLOCK) {
if (flags & O_EXCL) {
if (sdp->open_cnt > 0) {
retval = -EBUSY;
goto error_mutex_locked;
}
} else {
if (sdp->exclude) {
retval = -EBUSY;
goto error_mutex_locked;
}
}
} else {
retval = open_wait(sdp, flags);
if (retval) /* -ERESTARTSYS or -ENODEV */
goto error_mutex_locked;
}
/* N.B. at this point we are holding the open_rel_lock */
if (flags & O_EXCL)
sdp->exclude = true;
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
if (IS_ERR(sfp)) {
retval = PTR_ERR(sfp);
goto out_undo;
}
filp->private_data = sfp;
sdp->open_cnt++;
mutex_unlock(&sdp->open_rel_lock);
retval = 0;
sg_put:
kref_put(&sdp->d_ref, sg_device_destroy);
return retval;
out_undo:
if (flags & O_EXCL) {
sdp->exclude = false; /* undo if error */
wake_up_interruptible(&sdp->open_wait);
}
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
goto sg_put;
}
/* Release resources associated with a successful sg_open()
* Returns 0 on success, else a negated errno value */
static int
sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
mutex_lock(&sdp->open_rel_lock);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
sdp->open_cnt--;
/* possibly many open()s waiting on exlude clearing, start many;
* only open(O_EXCL)s wait on 0==open_cnt so only start one */
if (sdp->exclude) {
sdp->exclude = false;
wake_up_interruptible_all(&sdp->open_wait);
} else if (0 == sdp->open_cnt) {
wake_up_interruptible(&sdp->open_wait);
}
mutex_unlock(&sdp->open_rel_lock);
return 0;
}
static ssize_t
sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
{
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
sg_io_hdr_t *hp;
struct sg_header *old_hdr = NULL;
int retval = 0;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (!old_hdr)
return -ENOMEM;
if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
retval =__copy_from_user
(new_hdr, buf, SZ_SG_IO_HDR);
req_pack_id = new_hdr->pack_id;
kfree(new_hdr);
if (retval) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
req_pack_id = old_hdr->pack_id;
}
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (filp->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
if (retval) {
/* -ERESTARTSYS as signal hit process */
goto free_old_hdr;
}
}
if (srp->header.interface_id != '\0') {
retval = sg_new_read(sfp, buf, count, srp);
goto free_old_hdr;
}
hp = &srp->header;
if (old_hdr == NULL) {
old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
if (! old_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
}
memset(old_hdr, 0, SZ_SG_HEADER);
old_hdr->reply_len = (int) hp->timeout;
old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
old_hdr->pack_id = hp->pack_id;
old_hdr->twelve_byte =
((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
old_hdr->target_status = hp->masked_status;
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status))
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
case DID_OK:
case DID_PASSTHROUGH:
case DID_SOFT_ERROR:
old_hdr->result = 0;
break;
case DID_NO_CONNECT:
case DID_BUS_BUSY:
case DID_TIME_OUT:
old_hdr->result = EBUSY;
break;
case DID_BAD_TARGET:
case DID_ABORT:
case DID_PARITY:
case DID_RESET:
case DID_BAD_INTR:
old_hdr->result = EIO;
break;
case DID_ERROR:
old_hdr->result = (srp->sense_b[0] == 0 &&
hp->masked_status == GOOD) ? 0 : EIO;
break;
default:
old_hdr->result = EIO;
break;
}
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
buf += SZ_SG_HEADER;
if (count > old_hdr->reply_len)
count = old_hdr->reply_len;
if (count > SZ_SG_HEADER) {
if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
}
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
return retval;
}
static ssize_t
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
int err = 0, err2;
int len;
if (count < SZ_SG_IO_HDR) {
err = -EINVAL;
goto err_out;
}
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
(DRIVER_SENSE & hp->driver_status)) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
len = (len > sb_len) ? sb_len : len;
if (copy_to_user(hp->sbp, srp->sense_b, len)) {
err = -EFAULT;
goto err_out;
}
hp->sb_len_wr = len;
}
}
if (hp->masked_status || hp->host_status || hp->driver_status)
hp->info |= SG_INFO_CHECK;
if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
err = -EFAULT;
goto err_out;
}
err_out:
err2 = sg_finish_rem_req(srp);
return err ? : err2 ? : count;
}
static ssize_t
sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
int mxsize, cmd_size, k;
int input_size, blocking;
unsigned char opcode;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_write: count=%d\n", (int) count));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, filp, buf, count,
blocking, 0, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
mxsize -= SZ_SG_HEADER;
input_size -= SZ_SG_HEADER;
if (input_size < 0) {
sg_remove_request(sfp, srp);
return -EIO; /* User did not pass enough bytes for this command. */
}
hp = &srp->header;
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
hp->cmd_len = (unsigned char) cmd_size;
hp->iovec_count = 0;
hp->mx_sb_len = 0;
if (input_size > 0)
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
if (hp->dxfer_direction == SG_DXFER_TO_DEV)
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
static char cmd[TASK_COMM_LEN];
if (strcmp(current->comm, cmd)) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
strcpy(cmd, current->comm);
}
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
static ssize_t
sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
size_t count, int blocking, int read_only, int sg_io_owned,
Sg_request **o_srp)
{
int k;
Sg_request *srp;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
int timeout;
unsigned long ul_timeout;
if (count < SZ_SG_IO_HDR)
return -EINVAL;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_new_write: queue full\n"));
return -EDOM;
}
srp->sg_io_owned = sg_io_owned;
hp = &srp->header;
if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (hp->interface_id != 'S') {
sg_remove_request(sfp, srp);
return -ENOSYS;
}
if (hp->flags & SG_FLAG_MMAP_IO) {
if (hp->dxfer_len > sfp->reserve.bufflen) {
sg_remove_request(sfp, srp);
return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
}
if (hp->flags & SG_FLAG_DIRECT_IO) {
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
if (sg_res_in_use(sfp)) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
}
ul_timeout = msecs_to_jiffies(srp->header.timeout);
timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
}
if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
if (read_only && sg_allow_access(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
if (k < 0)
return k;
if (o_srp)
*o_srp = srp;
return count;
}
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
int k, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
hp->status = 0;
hp->masked_status = 0;
hp->msg_status = 0;
hp->info = 0;
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio)
blk_end_request_all(srp->rq, -EIO);
sg_finish_rem_req(srp);
return -ENODEV;
}
hp->duration = jiffies_to_msecs(jiffies);
if (hp->interface_id != '\0' && /* v3 (or later) interface */
(SG_FLAG_Q_AT_TAIL & hp->flags))
at_head = 0;
else
at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
srp->rq, at_head, sg_rq_end_io);
return 0;
}
static int srp_done(Sg_fd *sfp, Sg_request *srp)
{
unsigned long flags;
int ret;
read_lock_irqsave(&sfp->rq_list_lock, flags);
ret = srp->done;
read_unlock_irqrestore(&sfp->rq_list_lock, flags);
return ret;
}
static int max_sectors_bytes(struct request_queue *q)
{
unsigned int max_sectors = queue_max_sectors(q);
max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
return max_sectors << 9;
}
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
int __user *ip = p;
int result, val, read_only;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
unsigned long iflags;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
write_unlock_irq(&sfp->rq_list_lock);
result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
return (result < 0) ? result : 0;
}
srp->orphan = 1;
write_unlock_irq(&sfp->rq_list_lock);
return result; /* -ERESTARTSYS because signal hit process */
case SG_SET_TIMEOUT:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EIO;
if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
val = MULDIV (INT_MAX, USER_HZ, HZ);
sfp->timeout_user = val;
sfp->timeout = MULDIV (val, HZ, USER_HZ);
return 0;
case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
result = get_user(val, ip);
if (result)
return result;
if (val) {
sfp->low_dma = 1;
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
} else {
if (atomic_read(&sdp->detaching))
return -ENODEV;
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
}
return 0;
case SG_GET_LOW_DMA:
return put_user((int) sfp->low_dma, ip);
case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT;
else {
sg_scsi_id_t __user *sg_idp = p;
if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
__put_user((int) sdp->device->channel,
&sg_idp->channel);
__put_user((int) sdp->device->id, &sg_idp->scsi_id);
__put_user((int) sdp->device->lun, &sg_idp->lun);
__put_user((int) sdp->device->type, &sg_idp->scsi_type);
__put_user((short) sdp->device->host->cmd_per_lun,
&sg_idp->h_cmd_per_lun);
__put_user((short) sdp->device->queue_depth,
&sg_idp->d_queue_depth);
__put_user(0, &sg_idp->unused[0]);
__put_user(0, &sg_idp->unused[1]);
return 0;
}
case SG_SET_FORCE_PACK_ID:
result = get_user(val, ip);
if (result)
return result;
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
__put_user(srp->header.pack_id, ip);
return 0;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
__put_user(-1, ip);
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return put_user(val, ip);
case SG_GET_SG_TABLESIZE:
return put_user(sdp->sg_tablesize, ip);
case SG_SET_RESERVED_SIZE:
result = get_user(val, ip);
if (result)
return result;
if (val < 0)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
if (result)
return result;
sfp->cmd_q = val ? 1 : 0;
return 0;
case SG_GET_COMMAND_Q:
return put_user((int) sfp->cmd_q, ip);
case SG_SET_KEEP_ORPHAN:
result = get_user(val, ip);
if (result)
return result;
sfp->keep_orphan = val;
return 0;
case SG_GET_KEEP_ORPHAN:
return put_user((int) sfp->keep_orphan, ip);
case SG_NEXT_CMD_LEN:
result = get_user(val, ip);
if (result)
return result;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
return put_user(sg_version_num, ip);
case SG_GET_ACCESS_COUNT:
/* faked - we don't have a real access count anymore */
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
return -EFAULT;
else {
sg_req_info_t *rinfo;
unsigned int ms;
rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
++val, srp = srp ? srp->nextrp : srp) {
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
if (srp) {
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
srp->header.masked_status &
srp->header.host_status &
srp->header.driver_status;
if (srp->done)
rinfo[val].duration =
srp->header.duration;
else {
ms = jiffies_to_msecs(jiffies);
rinfo[val].duration =
(ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
}
rinfo[val].orphan = srp->orphan;
rinfo[val].sg_io_owned =
srp->sg_io_owned;
rinfo[val].pack_id =
srp->header.pack_id;
rinfo[val].usr_ptr =
srp->header.usr_ptr;
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
}
case SG_EMULATED_HOST:
if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
Scsi_Ioctl_Command __user *siocp = p;
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
return result;
sdp->sgdebug = (char) val;
return 0;
case BLKSECTGET:
return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->disk->disk_name,
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
NULL,
(char *)arg);
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
return blk_trace_startstop(sdp->device->request_queue, 0);
case BLKTRACETEARDOWN:
return blk_trace_remove(sdp->device->request_queue);
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
case SG_SCSI_RESET:
if (atomic_read(&sdp->detaching))
return -ENODEV;
break;
default:
if (read_only)
return -EPERM; /* don't know so take safe approach */
break;
}
result = scsi_ioctl_block_when_processing_errors(sdp->device,
cmd_in, filp->f_flags & O_NDELAY);
if (result)
return result;
return scsi_ioctl(sdp->device, cmd_in, p);
}
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
Sg_device *sdp;
Sg_fd *sfp;
struct scsi_device *sdev;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
sdev = sdp->device;
if (sdev->host->hostt->compat_ioctl) {
int ret;
ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
return ret;
}
return -ENOIOCTLCMD;
}
#endif
static unsigned int
sg_poll(struct file *filp, poll_table * wait)
{
unsigned int res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
int count = 0;
unsigned long iflags;
sfp = filp->private_data;
if (!sfp)
return POLLERR;
sdp = sfp->parentdp;
if (!sdp)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
++count;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (atomic_read(&sdp->detaching))
res |= POLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
res |= POLLOUT | POLLWRNORM;
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_poll: res=0x%x\n", (int) res));
return res;
}
static int
sg_fasync(int fd, struct file *filp, int mode)
{
Sg_device *sdp;
Sg_fd *sfp;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_fasync: mode=%d\n", mode));
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
static int
sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
rsv_schp = &sfp->reserve;
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
if (offset < len) {
struct page *page = nth_page(rsv_schp->pages[k],
offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
}
sa += len;
offset -= len;
}
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct sg_mmap_vm_ops = {
.fault = sg_vma_fault,
};
static int
sg_mmap(struct file *filp, struct vm_area_struct *vma)
{
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
req_sz = vma->vm_end - vma->vm_start;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
"sg_mmap starting, vm_start=%p, len=%d\n",
(void *) vma->vm_start, (int) req_sz));
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
if (req_sz > rsv_schp->bufflen)
return -ENOMEM; /* cannot map more than reserved buffer */
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
len = (len < length) ? len : length;
sa += len;
}
sfp->mmap_called = 1;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
return 0;
}
static void
sg_rq_end_io_usercontext(struct work_struct *work)
{
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
/*
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
static void
sg_rq_end_io(struct request *rq, int uptodate)
{
struct sg_request *srp = rq->end_io_data;
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
unsigned int ms;
char *sense;
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
return;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
return;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
sense = rq->sense;
result = rq->errors;
resid = rq->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
srp->header.pack_id, result));
srp->header.resid = resid;
ms = jiffies_to_msecs(jiffies);
srp->header.duration = (ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
if (0 != result) {
struct scsi_sense_hdr sshdr;
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.masked_status) ||
(COMMAND_TERMINATED == srp->header.masked_status)))
__scsi_print_sense(sdp->device, __func__, sense,
SCSI_SENSE_BUFFERSIZE);
/* Following if statement is a patch supplied by Eric Youngdale */
if (driver_byte(result) != 0
&& scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
&& !scsi_sense_is_deferred(&sshdr)
&& sshdr.sense_key == UNIT_ATTENTION
&& sdp->device->removable) {
/* Detected possible disc change. Set the bit - this */
/* may be used if there are filesystems using this device */
sdp->device->changed = 1;
}
}
/* Rely on write phase to clean out srp status values, so no "else" */
/*
* Free the request as soon as it is complete so that its resources
* can be reused without waiting for userspace to read() the
* result. But keep the associated bio (if any) around until
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
__blk_put_request(rq->q, rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
if (sfp->keep_orphan)
srp->sg_io_owned = 0;
else
done = 0;
}
srp->done = done;
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (likely(done)) {
/* Now wake up any sg_read() that is waiting for this
* packet.
*/
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
kref_put(&sfp->f_ref, sg_remove_sfp);
} else {
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
}
static const struct file_operations sg_fops = {
.owner = THIS_MODULE,
.read = sg_read,
.write = sg_write,
.poll = sg_poll,
.unlocked_ioctl = sg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sg_compat_ioctl,
#endif
.open = sg_open,
.mmap = sg_mmap,
.release = sg_release,
.fasync = sg_fasync,
.llseek = no_llseek,
};
static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
static Sg_device *
sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
unsigned long iflags;
int error;
u32 k;
sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
"failure\n", __func__);
return ERR_PTR(-ENOMEM);
}
idr_preload(GFP_KERNEL);
write_lock_irqsave(&sg_index_lock, iflags);
error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
if (error < 0) {
if (error == -ENOSPC) {
sdev_printk(KERN_WARNING, scsidp,
"Unable to attach sg device type=%d, minor number exceeds %d\n",
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
sdev_printk(KERN_WARNING, scsidp, "%s: idr "
"allocation Sg_device failure: %d\n",
__func__, error);
}
goto out_unlock;
}
k = error;
SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
"sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->open_wait);
atomic_set(&sdp->detaching, 0);
rwlock_init(&sdp->sfd_lock);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
error = 0;
out_unlock:
write_unlock_irqrestore(&sg_index_lock, iflags);
idr_preload_end();
if (error) {
kfree(sdp);
return ERR_PTR(error);
}
return sdp;
}
static int
sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
Sg_device *sdp = NULL;
struct cdev * cdev = NULL;
int error;
unsigned long iflags;
disk = alloc_disk(1);
if (!disk) {
pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
pr_warn("%s: cdev_alloc failed\n", __func__);
goto out;
}
cdev->owner = THIS_MODULE;
cdev->ops = &sg_fops;
sdp = sg_alloc(disk, scsidp);
if (IS_ERR(sdp)) {
pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
goto out;
}
error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
if (error)
goto cdev_add_err;
sdp->cdev = cdev;
if (sg_sysfs_valid) {
struct device *sg_class_member;
sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
MKDEV(SCSI_GENERIC_MAJOR,
sdp->index),
sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
goto cdev_add_err;
}
error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
pr_err("%s: unable to make symlink 'generic' back "
"to sg%d\n", __func__, sdp->index);
} else
pr_warn("%s: sg_sys Invalid\n", __func__);
sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
"type %d\n", sdp->index, scsidp->type);
dev_set_drvdata(cl_dev, sdp);
return 0;
cdev_add_err:
write_lock_irqsave(&sg_index_lock, iflags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, iflags);
kfree(sdp);
out:
put_disk(disk);
if (cdev)
cdev_del(cdev);
return error;
}
static void
sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
/* CAUTION! Note that the device can still be found via idr_find()
* even though the refcount is 0. Therefore, do idr_remove() BEFORE
* any other cleanup.
*/
write_lock_irqsave(&sg_index_lock, flags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, flags);
SCSI_LOG_TIMEOUT(3,
sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
static void
sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
int val;
if (!sdp)
return;
/* want sdp->detaching non-zero as soon as possible */
val = atomic_inc_return(&sdp->detaching);
if (val > 1)
return; /* only want to do following once per device */
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"%s\n", __func__));
read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
wake_up_interruptible_all(&sdp->open_wait);
read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
kref_put(&sdp->d_ref, sg_device_destroy);
}
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
module_param_named(def_reserved_size, def_reserved_size, int,
S_IRUGO | S_IWUSR);
module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Douglas Gilbert");
MODULE_DESCRIPTION("SCSI generic (sg) driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SG_VERSION_STR);
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
"size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
static int __init
init_sg(void)
{
int rc;
if (scatter_elem_sz < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = scatter_elem_sz;
}
if (def_reserved_size >= 0)
sg_big_buff = def_reserved_size;
else
def_reserved_size = sg_big_buff;
rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS, "sg");
if (rc)
return rc;
sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
if ( IS_ERR(sg_sysfs_class) ) {
rc = PTR_ERR(sg_sysfs_class);
goto err_out;
}
sg_sysfs_valid = 1;
rc = scsi_register_interface(&sg_interface);
if (0 == rc) {
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_init();
#endif /* CONFIG_SCSI_PROC_FS */
return 0;
}
class_destroy(sg_sysfs_class);
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
}
static void __exit
exit_sg(void)
{
#ifdef CONFIG_SCSI_PROC_FS
sg_proc_cleanup();
#endif /* CONFIG_SCSI_PROC_FS */
scsi_unregister_interface(&sg_interface);
class_destroy(sg_sysfs_class);
sg_sysfs_valid = 0;
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS);
idr_destroy(&sg_index_idr);
}
static int
sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
blk_rq_set_block_pc(rq);
if (hp->cmd_len > BLK_MAX_CDB)
rq->cmd = long_cmdp;
memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
sg_link_reserve(sfp, srp, dxfer_len);
else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res)
return res;
}
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (iov_count) {
struct iovec *iov = NULL;
struct iov_iter i;
res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
if (res < 0)
return res;
iov_iter_truncate(&i, hp->dxfer_len);
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
static int
sg_finish_rem_req(Sg_request *srp)
{
int ret = 0;
Sg_fd *sfp = srp->parentfp;
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_finish_rem_req: res_used=%d\n",
(int) srp->res_used));
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
if (srp->rq) {
if (srp->rq->cmd != srp->rq->__cmd)
kfree(srp->rq->cmd);
blk_put_request(srp->rq);
}
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(sfp, req_schp);
sg_remove_request(sfp, srp);
return ret;
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
int sg_bufflen = tablesize * sizeof(struct page *);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
schp->pages = kzalloc(sg_bufflen, gfp_flags);
if (!schp->pages)
return -ENOMEM;
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: buff_size=%d, blk_size=%d\n",
buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
if (mx_sc_elems < 0)
return mx_sc_elems; /* most likely -ENOMEM */
num = scatter_elem_sz;
if (unlikely(num != scatter_elem_sz_prev)) {
if (num < PAGE_SIZE) {
scatter_elem_sz = PAGE_SIZE;
scatter_elem_sz_prev = PAGE_SIZE;
} else
scatter_elem_sz_prev = num;
}
if (sfp->low_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
gfp_mask |= __GFP_ZERO;
order = get_order(num);
retry:
ret_sz = 1 << (PAGE_SHIFT + order);
for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
k++, rem_sz -= ret_sz) {
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
schp->pages[k] = alloc_pages(gfp_mask, order);
if (!schp->pages[k])
goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
scatter_elem_sz = ret_sz;
scatter_elem_sz_prev = ret_sz;
}
}
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
return 0;
out:
for (i = 0; i < k; i++)
__free_pages(schp->pages[i], order);
if (--order >= 0)
goto retry;
return -ENOMEM;
}
static void
sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
if (schp->pages && schp->sglist_len > 0) {
if (!schp->dio_in_use) {
int k;
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
SCSI_LOG_TIMEOUT(5,
sg_printk(KERN_INFO, sfp->parentdp,
"sg_remove_scat: k=%d, pg=0x%p\n",
k, schp->pages[k]));
__free_pages(schp->pages[k], schp->page_order);
}
kfree(schp->pages);
}
}
memset(schp, 0, sizeof (*schp));
}
static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
int k, num;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_read_oxfer: num_read_xfer=%d\n",
num_read_xfer));
if ((!outp) || (num_read_xfer <= 0))
return 0;
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
if (__copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
if (num_read_xfer <= 0)
break;
outp += num;
}
}
return 0;
}
static void
sg_build_reserve(Sg_fd * sfp, int req_size)
{
Sg_scatter_hold *schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_build_reserve: req_size=%d\n", req_size));
do {
if (req_size < PAGE_SIZE)
req_size = PAGE_SIZE;
if (0 == sg_build_indirect(schp, sfp, req_size))
return;
else
sg_remove_scat(sfp, schp);
req_size >>= 1; /* divide by 2 */
} while (req_size > (PAGE_SIZE / 2));
}
static void
sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: size=%d\n", size));
rem = size;
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg; k++) {
if (rem <= num) {
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
req_schp->pages = rsv_schp->pages;
req_schp->bufflen = size;
req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
}
if (k >= rsv_schp->k_use_sg)
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_link_reserve: BAD size\n"));
}
static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
"sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
}
static Sg_request *
sg_get_rq_mark(Sg_fd * sfp, int pack_id)
{
Sg_request *resp;
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
for (resp = sfp->headrp; resp; resp = resp->nextrp) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
break;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
/* always adds to end of list */
static Sg_request *
sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
resp = sfp->headrp;
if (!resp) {
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
resp = rp;
sfp->headrp = resp;
} else {
if (0 == sfp->cmd_q)
resp = NULL; /* command queuing disallowed */
else {
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
if (!rp->parentfp)
break;
}
if (k < SG_MAX_QUEUE) {
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
while (resp->nextrp)
resp = resp->nextrp;
resp->nextrp = rp;
resp = rp;
} else
resp = NULL;
}
}
if (resp) {
resp->nextrp = NULL;
resp->header.duration = jiffies_to_msecs(jiffies);
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
Sg_request *prev_rp;
Sg_request *rp;
unsigned long iflags;
int res = 0;
if ((!sfp) || (!srp) || (!sfp->headrp))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
prev_rp = sfp->headrp;
if (srp == prev_rp) {
sfp->headrp = prev_rp->nextrp;
prev_rp->parentfp = NULL;
res = 1;
} else {
while ((rp = prev_rp->nextrp)) {
if (srp == rp) {
prev_rp->nextrp = rp->nextrp;
rp->parentfp = NULL;
res = 1;
break;
}
prev_rp = rp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
}
static Sg_fd *
sg_add_sfp(Sg_device * sdp)
{
Sg_fd *sfp;
unsigned long iflags;
int bufflen;
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
kref_init(&sfp->f_ref);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
sdp->device->host->unchecked_isa_dma : 1;
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
max_sectors_bytes(sdp->device->request_queue));
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen,
sfp->reserve.k_use_sg));
kref_get(&sdp->d_ref);
__module_get(THIS_MODULE);
return sfp;
}
static void
sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
/* Cleanup any responses which were never read(). */
while (sfp->headrp)
sg_finish_rem_req(sfp->headrp);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen,
(int) sfp->reserve.k_use_sg));
sg_remove_scat(sfp, &sfp->reserve);
}
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
module_put(THIS_MODULE);
}
static void
sg_remove_sfp(struct kref *kref)
{
struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
write_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
}
static int
sg_res_in_use(Sg_fd * sfp)
{
const Sg_request *srp;
unsigned long iflags;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp)
if (srp->res_used)
break;
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return srp ? 1 : 0;
}
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
{
int *k = data;
if (*k < id)
*k = id;
return 0;
}
static int
sg_last_dev(void)
{
int k = -1;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
read_unlock_irqrestore(&sg_index_lock, iflags);
return k + 1; /* origin 1 */
}
#endif
/* must be called with sg_index_lock held */
static Sg_device *sg_lookup_dev(int dev)
{
return idr_find(&sg_index_idr, dev);
}
static Sg_device *
sg_get_dev(int dev)
{
struct sg_device *sdp;
unsigned long flags;
read_lock_irqsave(&sg_index_lock, flags);
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
else if (atomic_read(&sdp->detaching)) {
/* If sdp->detaching, then the refcount may already be 0, in
* which case it would be a bug to do kref_get().
*/
sdp = ERR_PTR(-ENODEV);
} else
kref_get(&sdp->d_ref);
read_unlock_irqrestore(&sg_index_lock, flags);
return sdp;
}
#ifdef CONFIG_SCSI_PROC_FS
static struct proc_dir_entry *sg_proc_sgp = NULL;
static char sg_proc_sg_dirname[] = "scsi/sg";
static int sg_proc_seq_show_int(struct seq_file *s, void *v);
static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off);
static const struct file_operations adio_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_adio,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_adio,
.release = single_release,
};
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
static ssize_t sg_proc_write_dressz(struct file *filp,
const char __user *buffer, size_t count, loff_t *off);
static const struct file_operations dressz_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_dressz,
.read = seq_read,
.llseek = seq_lseek,
.write = sg_proc_write_dressz,
.release = single_release,
};
static int sg_proc_seq_show_version(struct seq_file *s, void *v);
static int sg_proc_single_open_version(struct inode *inode, struct file *file);
static const struct file_operations version_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_version,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
static const struct file_operations devhdr_fops = {
.owner = THIS_MODULE,
.open = sg_proc_single_open_devhdr,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
static int sg_proc_open_dev(struct inode *inode, struct file *file);
static void * dev_seq_start(struct seq_file *s, loff_t *pos);
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
static void dev_seq_stop(struct seq_file *s, void *v);
static const struct file_operations dev_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_dev,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_dev,
};
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
static const struct file_operations devstrs_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_devstrs,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations devstrs_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_devstrs,
};
static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
static int sg_proc_open_debug(struct inode *inode, struct file *file);
static const struct file_operations debug_fops = {
.owner = THIS_MODULE,
.open = sg_proc_open_debug,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations debug_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = sg_proc_seq_show_debug,
};
struct sg_proc_leaf {
const char * name;
const struct file_operations * fops;
};
static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
{"allow_dio", &adio_fops},
{"debug", &debug_fops},
{"def_reserved_size", &dressz_fops},
{"device_hdr", &devhdr_fops},
{"devices", &dev_fops},
{"device_strs", &devstrs_fops},
{"version", &version_fops}
};
static int
sg_proc_init(void)
{
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
}
static void
sg_proc_cleanup(void)
{
int k;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
if (!sg_proc_sgp)
return;
for (k = 0; k < num_leaves; ++k)
remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
remove_proc_entry(sg_proc_sg_dirname, NULL);
}
static int sg_proc_seq_show_int(struct seq_file *s, void *v)
{
seq_printf(s, "%d\n", *((int *)s->private));
return 0;
}
static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
}
static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &num);
if (err)
return err;
sg_allow_dio = num ? 1 : 0;
return count;
}
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
}
static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
int err;
unsigned long k = ULONG_MAX;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
err = kstrtoul_from_user(buffer, count, 0, &k);
if (err)
return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
}
return -ERANGE;
}
static int sg_proc_seq_show_version(struct seq_file *s, void *v)
{
seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
sg_version_date);
return 0;
}
static int sg_proc_single_open_version(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_version, NULL);
}
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
{
seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
return 0;
}
static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
{
return single_open(file, sg_proc_seq_show_devhdr, NULL);
}
struct sg_proc_deviter {
loff_t index;
size_t max;
};
static void * dev_seq_start(struct seq_file *s, loff_t *pos)
{
struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
s->private = it;
if (! it)
return NULL;
it->index = *pos;
it->max = sg_last_dev();
if (it->index >= it->max)
return NULL;
return it;
}
static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct sg_proc_deviter * it = s->private;
*pos = ++it->index;
return (it->index < it->max) ? it : NULL;
}
static void dev_seq_stop(struct seq_file *s, void *v)
{
kfree(s->private);
}
static int sg_proc_open_dev(struct inode *inode, struct file *file)
{
return seq_open(file, &dev_seq_ops);
}
static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if ((NULL == sdp) || (NULL == sdp->device) ||
(atomic_read(&sdp->detaching)))
seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
else {
scsidp = sdp->device;
seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
(int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
{
return seq_open(file, &devstrs_seq_ops);
}
static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
scsidp = sdp ? sdp->device : NULL;
if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
scsidp->vendor, scsidp->model, scsidp->rev);
else
seq_puts(s, "<no active device>\n");
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, m, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
const char * cp;
unsigned int ms;
k = 0;
list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
k++;
read_lock(&fp->rq_list_lock); /* irqs already disabled */
seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
"(res)sgat=%d low_dma=%d\n", k,
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
(int) fp->low_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
for (m = 0, srp = fp->headrp;
srp != NULL;
++m, srp = srp->nextrp) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
cp = " rb>> ";
} else {
if (SG_INFO_DIRECT_IO_MASK & hp->info)
cp = " dio>> ";
else
cp = " ";
}
seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
seq_puts(s, srp->done ?
((1 == srp->done) ? "rcv:" : "fin:")
: "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
seq_printf(s, " dur=%d", hp->duration);
else {
ms = jiffies_to_msecs(jiffies);
seq_printf(s, " t_o/elap=%d/%d",
(new_interface ? hp->timeout :
jiffies_to_msecs(fp->timeout)),
(ms > hp->duration ? ms - hp->duration : 0));
}
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
if (0 == m)
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
static int sg_proc_open_debug(struct inode *inode, struct file *file)
{
return seq_open(file, &debug_seq_ops);
}
static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
unsigned long iflags;
if (it && (0 == it->index))
seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
(int)it->max, sg_big_buff);
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if (NULL == sdp)
goto skip;
read_lock(&sdp->sfd_lock);
if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
if (atomic_read(&sdp->detaching))
seq_puts(s, "detaching pending close ");
else if (sdp->device) {
struct scsi_device *scsidp = sdp->device;
seq_printf(s, "%d:%d:%d:%llu em=%d",
scsidp->host->host_no,
scsidp->channel, scsidp->id,
scsidp->lun,
scsidp->host->hostt->emulated);
}
seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sg_proc_debug_helper(s, sdp);
}
read_unlock(&sdp->sfd_lock);
skip:
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
#endif /* CONFIG_SCSI_PROC_FS */
module_init(init_sg);
module_exit(exit_sg);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1697_0 |
crossvul-cpp_data_bad_3527_1 | /*
* fs/nfs/nfs4xdr.c
*
* Client-side XDR for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/param.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
#include "internal.h"
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_XDR
/* Mapping from NFS error code to "errno" error code. */
#define errno_NFSERR_IO EIO
static int nfs4_stat_to_errno(int);
/* NFSv4 COMPOUND tags are only wanted for debugging purposes */
#ifdef DEBUG
#define NFS4_MAXTAGLEN 20
#else
#define NFS4_MAXTAGLEN 0
#endif
/* lock,open owner id:
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
#define open_owner_id_maxsz (1 + 1 + 4)
#define lock_owner_id_maxsz (1 + 1 + 4)
#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
#define op_decode_hdr_maxsz (2)
#define encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
#define encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define decode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define encode_putfh_maxsz (op_encode_hdr_maxsz + 1 + \
(NFS4_FHSIZE >> 2))
#define decode_putfh_maxsz (op_decode_hdr_maxsz)
#define encode_putrootfh_maxsz (op_encode_hdr_maxsz)
#define decode_putrootfh_maxsz (op_decode_hdr_maxsz)
#define encode_getfh_maxsz (op_encode_hdr_maxsz)
#define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \
((3+NFS4_FHSIZE) >> 2))
#define nfs4_fattr_bitmap_maxsz 4
#define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
#define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
/* This is based on getfattr, which uses the most attributes: */
#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz))
#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
nfs4_fattr_value_maxsz)
#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
#define encode_attrs_maxsz (nfs4_fattr_bitmap_maxsz + \
1 + 2 + 1 + \
nfs4_owner_maxsz + \
nfs4_group_maxsz + \
4 + 4)
#define encode_savefh_maxsz (op_encode_hdr_maxsz)
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (encode_getattr_maxsz)
/* The 5 accounts for the PNFS attributes, and assumes that at most three
* layout types will be returned.
*/
#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + \
nfs4_fattr_bitmap_maxsz + 4 + 8 + 5)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
#define decode_renew_maxsz (op_decode_hdr_maxsz)
#define encode_setclientid_maxsz \
(op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \
XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \
1 /* sc_prog */ + \
XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \
XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \
1) /* sc_cb_ident */
#define decode_setclientid_maxsz \
(op_decode_hdr_maxsz + \
2 + \
1024) /* large value for CLID_INUSE */
#define encode_setclientid_confirm_maxsz \
(op_encode_hdr_maxsz + \
3 + (NFS4_VERIFIER_SIZE >> 2))
#define decode_setclientid_confirm_maxsz \
(op_decode_hdr_maxsz)
#define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
#define decode_lookup_maxsz (op_decode_hdr_maxsz)
#define encode_share_access_maxsz \
(2)
#define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz)
#define encode_opentype_maxsz (1 + encode_createmode_maxsz)
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
2 + encode_share_access_maxsz + 2 + \
open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_ace_maxsz (3 + nfs4_owner_maxsz)
#define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \
decode_ace_maxsz)
#define decode_change_info_maxsz (5)
#define decode_open_maxsz (op_decode_hdr_maxsz + \
decode_stateid_maxsz + \
decode_change_info_maxsz + 1 + \
nfs4_fattr_bitmap_maxsz + \
decode_delegation_maxsz)
#define encode_open_confirm_maxsz \
(op_encode_hdr_maxsz + \
encode_stateid_maxsz + 1)
#define decode_open_confirm_maxsz \
(op_decode_hdr_maxsz + \
decode_stateid_maxsz)
#define encode_open_downgrade_maxsz \
(op_encode_hdr_maxsz + \
encode_stateid_maxsz + 1 + \
encode_share_access_maxsz)
#define decode_open_downgrade_maxsz \
(op_decode_hdr_maxsz + \
decode_stateid_maxsz)
#define encode_close_maxsz (op_encode_hdr_maxsz + \
1 + encode_stateid_maxsz)
#define decode_close_maxsz (op_decode_hdr_maxsz + \
decode_stateid_maxsz)
#define encode_setattr_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + \
encode_attrs_maxsz)
#define decode_setattr_maxsz (op_decode_hdr_maxsz + \
nfs4_fattr_bitmap_maxsz)
#define encode_read_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
#define decode_read_maxsz (op_decode_hdr_maxsz + 2)
#define encode_readdir_maxsz (op_encode_hdr_maxsz + \
2 + encode_verifier_maxsz + 5)
#define decode_readdir_maxsz (op_decode_hdr_maxsz + \
decode_verifier_maxsz)
#define encode_readlink_maxsz (op_encode_hdr_maxsz)
#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1)
#define encode_write_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 4)
#define decode_write_maxsz (op_decode_hdr_maxsz + \
2 + decode_verifier_maxsz)
#define encode_commit_maxsz (op_encode_hdr_maxsz + 3)
#define decode_commit_maxsz (op_decode_hdr_maxsz + \
decode_verifier_maxsz)
#define encode_remove_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_remove_maxsz (op_decode_hdr_maxsz + \
decode_change_info_maxsz)
#define encode_rename_maxsz (op_encode_hdr_maxsz + \
2 * nfs4_name_maxsz)
#define decode_rename_maxsz (op_decode_hdr_maxsz + \
decode_change_info_maxsz + \
decode_change_info_maxsz)
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
#define encode_lockowner_maxsz (7)
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 1 + \
encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
(8 + decode_lockowner_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
encode_lockowner_maxsz)
#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
encode_stateid_maxsz + \
4)
#define decode_locku_maxsz (op_decode_hdr_maxsz + \
decode_stateid_maxsz)
#define encode_release_lockowner_maxsz \
(op_encode_hdr_maxsz + \
encode_lockowner_maxsz)
#define decode_release_lockowner_maxsz \
(op_decode_hdr_maxsz)
#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
1 + nfs4_name_maxsz + \
1 + \
nfs4_fattr_maxsz)
#define decode_symlink_maxsz (op_decode_hdr_maxsz + 8)
#define encode_create_maxsz (op_encode_hdr_maxsz + \
1 + 2 + nfs4_name_maxsz + \
encode_attrs_maxsz)
#define decode_create_maxsz (op_decode_hdr_maxsz + \
decode_change_info_maxsz + \
nfs4_fattr_bitmap_maxsz)
#define encode_statfs_maxsz (encode_getattr_maxsz)
#define decode_statfs_maxsz (decode_getattr_maxsz)
#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4)
#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
#define encode_getacl_maxsz (encode_getattr_maxsz)
#define decode_getacl_maxsz (op_decode_hdr_maxsz + \
nfs4_fattr_bitmap_maxsz + 1)
#define encode_setacl_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + 3)
#define decode_setacl_maxsz (decode_setattr_maxsz)
#define encode_fs_locations_maxsz \
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
(0)
#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MACHINE_NAME_LEN (64)
#define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
encode_verifier_maxsz + \
1 /* co_ownerid.len */ + \
XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \
1 /* flags */ + \
1 /* spa_how */ + \
0 /* SP4_NONE (for now) */ + \
1 /* zero implemetation id array */)
#define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \
2 /* eir_clientid */ + \
1 /* eir_sequenceid */ + \
1 /* eir_flags */ + \
1 /* spr_how */ + \
0 /* SP4_NONE (for now) */ + \
2 /* eir_server_owner.so_minor_id */ + \
/* eir_server_owner.so_major_id<> */ \
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
/* eir_server_scope<> */ \
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
1 /* eir_server_impl_id array length */ + \
0 /* ignored eir_server_impl_id contents */)
#define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */)
#define decode_channel_attrs_maxsz (6 + \
1 /* ca_rdma_ird.len */ + \
1 /* ca_rdma_ird */)
#define encode_create_session_maxsz (op_encode_hdr_maxsz + \
2 /* csa_clientid */ + \
1 /* csa_sequence */ + \
1 /* csa_flags */ + \
encode_channel_attrs_maxsz + \
encode_channel_attrs_maxsz + \
1 /* csa_cb_program */ + \
1 /* csa_sec_parms.len (1) */ + \
1 /* cb_secflavor (AUTH_SYS) */ + \
1 /* stamp */ + \
1 /* machinename.len */ + \
XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \
1 /* uid */ + \
1 /* gid */ + \
1 /* gids.len (0) */)
#define decode_create_session_maxsz (op_decode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1 /* csr_sequence */ + \
1 /* csr_flags */ + \
decode_channel_attrs_maxsz + \
decode_channel_attrs_maxsz)
#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
#define encode_getdevicelist_maxsz (op_encode_hdr_maxsz + 4 + \
encode_verifier_maxsz)
#define decode_getdevicelist_maxsz (op_decode_hdr_maxsz + \
2 /* nfs_cookie4 gdlr_cookie */ + \
decode_verifier_maxsz \
/* verifier4 gdlr_verifier */ + \
1 /* gdlr_deviceid_list count */ + \
XDR_QUADLEN(NFS4_PNFS_GETDEVLIST_MAXNUM * \
NFS4_DEVICEID4_SIZE) \
/* gdlr_deviceid_list */ + \
1 /* bool gdlr_eof */)
#define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \
XDR_QUADLEN(NFS4_DEVICEID4_SIZE))
#define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \
1 /* layout type */ + \
1 /* opaque devaddr4 length */ + \
/* devaddr4 payload is read into page */ \
1 /* notification bitmap length */ + \
1 /* notification bitmap */)
#define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \
encode_stateid_maxsz)
#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
decode_stateid_maxsz + \
XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
#define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \
2 /* offset */ + \
2 /* length */ + \
1 /* reclaim */ + \
encode_stateid_maxsz + \
1 /* new offset (true) */ + \
2 /* last byte written */ + \
1 /* nt_timechanged (false) */ + \
1 /* layoutupdate4 layout type */ + \
1 /* NULL filelayout layoutupdate4 payload */)
#define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3)
#define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \
encode_stateid_maxsz + \
1 /* FIXME: opaque lrf_body always empty at the moment */)
#define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \
1 + decode_stateid_maxsz)
#define encode_secinfo_no_name_maxsz (op_encode_hdr_maxsz + 1)
#define decode_secinfo_no_name_maxsz decode_secinfo_maxsz
#define encode_test_stateid_maxsz (op_encode_hdr_maxsz + 2 + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_test_stateid_maxsz (op_decode_hdr_maxsz + 2 + 1)
#define encode_free_stateid_maxsz (op_encode_hdr_maxsz + 1 + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_free_stateid_maxsz (op_decode_hdr_maxsz + 1)
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
#endif /* CONFIG_NFS_V4_1 */
#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
#define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_read_maxsz)
#define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_read_maxsz)
#define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readlink_maxsz)
#define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readlink_maxsz)
#define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readdir_maxsz)
#define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readdir_maxsz)
#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_write_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_write_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_commit_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_commit_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_open_maxsz + \
encode_getfh_maxsz + \
encode_getattr_maxsz + \
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_open_maxsz + \
decode_getfh_maxsz + \
decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_confirm_sz \
(compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_open_confirm_maxsz)
#define NFS4_dec_open_confirm_sz \
(compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
decode_open_confirm_maxsz)
#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_downgrade_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_downgrade_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_close_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_close_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setattr_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setattr_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_fsinfo_maxsz)
#define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \
encode_renew_maxsz)
#define NFS4_dec_renew_sz (compound_decode_hdr_maxsz + \
decode_renew_maxsz)
#define NFS4_enc_setclientid_sz (compound_encode_hdr_maxsz + \
encode_setclientid_maxsz)
#define NFS4_dec_setclientid_sz (compound_decode_hdr_maxsz + \
decode_setclientid_maxsz)
#define NFS4_enc_setclientid_confirm_sz \
(compound_encode_hdr_maxsz + \
encode_setclientid_confirm_maxsz + \
encode_putrootfh_maxsz + \
encode_fsinfo_maxsz)
#define NFS4_dec_setclientid_confirm_sz \
(compound_decode_hdr_maxsz + \
decode_setclientid_confirm_maxsz + \
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lock_maxsz)
#define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lock_maxsz)
#define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lockt_maxsz)
#define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lockt_maxsz)
#define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_locku_maxsz)
#define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_locku_maxsz)
#define NFS4_enc_release_lockowner_sz \
(compound_encode_hdr_maxsz + \
encode_lockowner_maxsz)
#define NFS4_dec_release_lockowner_sz \
(compound_decode_hdr_maxsz + \
decode_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_access_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_access_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_remove_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_remove_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
encode_rename_maxsz + \
encode_getattr_maxsz + \
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
decode_rename_maxsz + \
decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
encode_link_maxsz + \
decode_getattr_maxsz + \
encode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
decode_link_maxsz + \
decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_symlink_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_symlink_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_create_maxsz + \
encode_getfh_maxsz + \
encode_getattr_maxsz + \
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_create_maxsz + \
decode_getfh_maxsz + \
decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_statfs_maxsz)
#define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_statfs_maxsz)
#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_delegreturn_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_delegreturn_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getacl_maxsz)
#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getacl_maxsz)
#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setacl_maxsz)
#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setacl_maxsz)
#define NFS4_enc_fs_locations_sz \
(compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_fs_locations_maxsz)
#define NFS4_dec_fs_locations_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_fs_locations_maxsz)
#define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_secinfo_maxsz)
#define NFS4_dec_secinfo_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_secinfo_maxsz)
#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_exchange_id_sz \
(compound_encode_hdr_maxsz + \
encode_exchange_id_maxsz)
#define NFS4_dec_exchange_id_sz \
(compound_decode_hdr_maxsz + \
decode_exchange_id_maxsz)
#define NFS4_enc_create_session_sz \
(compound_encode_hdr_maxsz + \
encode_create_session_maxsz)
#define NFS4_dec_create_session_sz \
(compound_decode_hdr_maxsz + \
decode_create_session_maxsz)
#define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \
encode_destroy_session_maxsz)
#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
decode_destroy_session_maxsz)
#define NFS4_enc_sequence_sz \
(compound_decode_hdr_maxsz + \
encode_sequence_maxsz)
#define NFS4_dec_sequence_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz)
#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
encode_fsinfo_maxsz)
#define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_reclaim_complete_maxsz)
#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_reclaim_complete_maxsz)
#define NFS4_enc_getdevicelist_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getdevicelist_maxsz)
#define NFS4_dec_getdevicelist_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getdevicelist_maxsz)
#define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz +\
encode_getdeviceinfo_maxsz)
#define NFS4_dec_getdeviceinfo_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_getdeviceinfo_maxsz)
#define NFS4_enc_layoutget_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_layoutget_maxsz)
#define NFS4_dec_layoutget_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_layoutget_maxsz)
#define NFS4_enc_layoutcommit_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz +\
encode_putfh_maxsz + \
encode_layoutcommit_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_layoutcommit_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_layoutcommit_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_layoutreturn_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_layoutreturn_maxsz)
#define NFS4_dec_layoutreturn_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_layoutreturn_maxsz)
#define NFS4_enc_secinfo_no_name_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putrootfh_maxsz +\
encode_secinfo_no_name_maxsz)
#define NFS4_dec_secinfo_no_name_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_secinfo_no_name_maxsz)
#define NFS4_enc_test_stateid_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_test_stateid_maxsz)
#define NFS4_dec_test_stateid_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_test_stateid_maxsz)
#define NFS4_enc_free_stateid_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_free_stateid_maxsz)
#define NFS4_dec_free_stateid_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_free_stateid_maxsz)
const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_encode_hdr_maxsz +
encode_sequence_maxsz +
encode_putfh_maxsz +
encode_getattr_maxsz) *
XDR_UNIT);
const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_decode_hdr_maxsz +
decode_sequence_maxsz +
decode_putfh_maxsz) *
XDR_UNIT);
#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
[NF4BAD] = 0,
[NF4REG] = S_IFREG,
[NF4DIR] = S_IFDIR,
[NF4BLK] = S_IFBLK,
[NF4CHR] = S_IFCHR,
[NF4LNK] = S_IFLNK,
[NF4SOCK] = S_IFSOCK,
[NF4FIFO] = S_IFIFO,
[NF4ATTRDIR] = 0,
[NF4NAMEDATTR] = 0,
};
struct compound_hdr {
int32_t status;
uint32_t nops;
__be32 * nops_p;
uint32_t taglen;
char * tag;
uint32_t replen; /* expected reply words */
u32 minorversion;
};
static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr_reserve_space(xdr, nbytes);
BUG_ON(!p);
return p;
}
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
__be32 *p;
p = xdr_reserve_space(xdr, 4 + len);
BUG_ON(p == NULL);
xdr_encode_opaque(p, str, len);
}
static void encode_compound_hdr(struct xdr_stream *xdr,
struct rpc_rqst *req,
struct compound_hdr *hdr)
{
__be32 *p;
struct rpc_auth *auth = req->rq_cred->cr_auth;
/* initialize running count of expected bytes in reply.
* NOTE: the replied tag SHOULD be the same is the one sent,
* but this is not required as a MUST for the server to do so. */
hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
p = reserve_space(xdr, 4 + hdr->taglen + 8);
p = xdr_encode_opaque(p, hdr->tag, hdr->taglen);
*p++ = cpu_to_be32(hdr->minorversion);
hdr->nops_p = p;
*p = cpu_to_be32(hdr->nops);
}
static void encode_nops(struct compound_hdr *hdr)
{
BUG_ON(hdr->nops > NFS4_MAX_OPS);
*hdr->nops_p = htonl(hdr->nops);
}
static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf)
{
__be32 *p;
p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
BUG_ON(p == NULL);
xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE);
}
static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server)
{
char owner_name[IDMAP_NAMESZ];
char owner_group[IDMAP_NAMESZ];
int owner_namelen = 0;
int owner_grouplen = 0;
__be32 *p;
__be32 *q;
int len;
uint32_t bmval0 = 0;
uint32_t bmval1 = 0;
/*
* We reserve enough space to write the entire attribute buffer at once.
* In the worst-case, this would be
* 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
* = 36 bytes, plus any contribution from variable-length fields
* such as owner/group.
*/
len = 16;
/* Sigh */
if (iap->ia_valid & ATTR_SIZE)
len += 8;
if (iap->ia_valid & ATTR_MODE)
len += 4;
if (iap->ia_valid & ATTR_UID) {
owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
if (owner_namelen < 0) {
dprintk("nfs: couldn't resolve uid %d to string\n",
iap->ia_uid);
/* XXX */
strcpy(owner_name, "nobody");
owner_namelen = sizeof("nobody") - 1;
/* goto out; */
}
len += 4 + (XDR_QUADLEN(owner_namelen) << 2);
}
if (iap->ia_valid & ATTR_GID) {
owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, owner_group, IDMAP_NAMESZ);
if (owner_grouplen < 0) {
dprintk("nfs: couldn't resolve gid %d to string\n",
iap->ia_gid);
strcpy(owner_group, "nobody");
owner_grouplen = sizeof("nobody") - 1;
/* goto out; */
}
len += 4 + (XDR_QUADLEN(owner_grouplen) << 2);
}
if (iap->ia_valid & ATTR_ATIME_SET)
len += 16;
else if (iap->ia_valid & ATTR_ATIME)
len += 4;
if (iap->ia_valid & ATTR_MTIME_SET)
len += 16;
else if (iap->ia_valid & ATTR_MTIME)
len += 4;
p = reserve_space(xdr, len);
/*
* We write the bitmap length now, but leave the bitmap and the attribute
* buffer length to be backfilled at the end of this routine.
*/
*p++ = cpu_to_be32(2);
q = p;
p += 3;
if (iap->ia_valid & ATTR_SIZE) {
bmval0 |= FATTR4_WORD0_SIZE;
p = xdr_encode_hyper(p, iap->ia_size);
}
if (iap->ia_valid & ATTR_MODE) {
bmval1 |= FATTR4_WORD1_MODE;
*p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
}
if (iap->ia_valid & ATTR_UID) {
bmval1 |= FATTR4_WORD1_OWNER;
p = xdr_encode_opaque(p, owner_name, owner_namelen);
}
if (iap->ia_valid & ATTR_GID) {
bmval1 |= FATTR4_WORD1_OWNER_GROUP;
p = xdr_encode_opaque(p, owner_group, owner_grouplen);
}
if (iap->ia_valid & ATTR_ATIME_SET) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(iap->ia_atime.tv_sec);
*p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
}
else if (iap->ia_valid & ATTR_ATIME) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
if (iap->ia_valid & ATTR_MTIME_SET) {
bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
*p++ = cpu_to_be32(0);
*p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
*p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
}
else if (iap->ia_valid & ATTR_MTIME) {
bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
/*
* Now we backfill the bitmap and the attribute buffer length.
*/
if (len != ((char *)p - (char *)q) + 4) {
printk(KERN_ERR "nfs: Attr length error, %u != %Zu\n",
len, ((char *)p - (char *)q) + 4);
BUG();
}
len = (char *)p - (char *)q - 12;
*q++ = htonl(bmval0);
*q++ = htonl(bmval1);
*q = htonl(len);
/* out: */
}
static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(OP_ACCESS);
*p = cpu_to_be32(access);
hdr->nops++;
hdr->replen += decode_access_maxsz;
}
static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8+NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_CLOSE);
*p++ = cpu_to_be32(arg->seqid->sequence->counter);
xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_close_maxsz;
}
static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 16);
*p++ = cpu_to_be32(OP_COMMIT);
p = xdr_encode_hyper(p, args->offset);
*p = cpu_to_be32(args->count);
hdr->nops++;
hdr->replen += decode_commit_maxsz;
}
static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(OP_CREATE);
*p = cpu_to_be32(create->ftype);
switch (create->ftype) {
case NF4LNK:
p = reserve_space(xdr, 4);
*p = cpu_to_be32(create->u.symlink.len);
xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len);
break;
case NF4BLK: case NF4CHR:
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(create->u.device.specdata1);
*p = cpu_to_be32(create->u.device.specdata2);
break;
default:
break;
}
encode_string(xdr, create->name->len, create->name->name);
hdr->nops++;
hdr->replen += decode_create_maxsz;
encode_attrs(xdr, create->attrs, create->server);
}
static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 12);
*p++ = cpu_to_be32(OP_GETATTR);
*p++ = cpu_to_be32(1);
*p = cpu_to_be32(bitmap);
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 16);
*p++ = cpu_to_be32(OP_GETATTR);
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bm0);
*p = cpu_to_be32(bm1);
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
static void
encode_getattr_three(struct xdr_stream *xdr,
uint32_t bm0, uint32_t bm1, uint32_t bm2,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_GETATTR);
if (bm2) {
p = reserve_space(xdr, 16);
*p++ = cpu_to_be32(3);
*p++ = cpu_to_be32(bm0);
*p++ = cpu_to_be32(bm1);
*p = cpu_to_be32(bm2);
} else if (bm1) {
p = reserve_space(xdr, 12);
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bm0);
*p = cpu_to_be32(bm1);
} else {
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(1);
*p = cpu_to_be32(bm0);
}
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0],
bitmask[1] & nfs4_fattr_bitmap[1], hdr);
}
static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_three(xdr,
bitmask[0] & nfs4_fsinfo_bitmap[0],
bitmask[1] & nfs4_fsinfo_bitmap[1],
bitmask[2] & nfs4_fsinfo_bitmap[2],
hdr);
}
static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0],
bitmask[1] & nfs4_fs_locations_bitmap[1], hdr);
}
static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_GETFH);
hdr->nops++;
hdr->replen += decode_getfh_maxsz;
}
static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8 + name->len);
*p++ = cpu_to_be32(OP_LINK);
xdr_encode_opaque(p, name->name, name->len);
hdr->nops++;
hdr->replen += decode_link_maxsz;
}
static inline int nfs4_lock_type(struct file_lock *fl, int block)
{
if ((fl->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) == F_RDLCK)
return block ? NFS4_READW_LT : NFS4_READ_LT;
return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
}
static inline uint64_t nfs4_lock_length(struct file_lock *fl)
{
if (fl->fl_end == OFFSET_MAX)
return ~(uint64_t)0;
return fl->fl_end - fl->fl_start + 1;
}
static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner)
{
__be32 *p;
p = reserve_space(xdr, 32);
p = xdr_encode_hyper(p, lowner->clientid);
*p++ = cpu_to_be32(20);
p = xdr_encode_opaque_fixed(p, "lock id:", 8);
*p++ = cpu_to_be32(lowner->s_dev);
xdr_encode_hyper(p, lowner->id);
}
/*
* opcode,type,reclaim,offset,length,new_lock_owner = 32
* open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
*/
static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 32);
*p++ = cpu_to_be32(OP_LOCK);
*p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
*p++ = cpu_to_be32(args->reclaim);
p = xdr_encode_hyper(p, args->fl->fl_start);
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
*p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
*p++ = cpu_to_be32(args->open_seqid->sequence->counter);
p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
encode_lockowner(xdr, &args->lock_owner);
}
else {
p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE);
*p = cpu_to_be32(args->lock_seqid->sequence->counter);
}
hdr->nops++;
hdr->replen += decode_lock_maxsz;
}
static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 24);
*p++ = cpu_to_be32(OP_LOCKT);
*p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
p = xdr_encode_hyper(p, args->fl->fl_start);
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
encode_lockowner(xdr, &args->lock_owner);
hdr->nops++;
hdr->replen += decode_lockt_maxsz;
}
static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16);
*p++ = cpu_to_be32(OP_LOCKU);
*p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
*p++ = cpu_to_be32(args->seqid->sequence->counter);
p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
p = xdr_encode_hyper(p, args->fl->fl_start);
xdr_encode_hyper(p, nfs4_lock_length(args->fl));
hdr->nops++;
hdr->replen += decode_locku_maxsz;
}
static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_RELEASE_LOCKOWNER);
encode_lockowner(xdr, lowner);
hdr->nops++;
hdr->replen += decode_release_lockowner_maxsz;
}
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
int len = name->len;
__be32 *p;
p = reserve_space(xdr, 8 + len);
*p++ = cpu_to_be32(OP_LOOKUP);
xdr_encode_opaque(p, name->name, len);
hdr->nops++;
hdr->replen += decode_lookup_maxsz;
}
static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
{
__be32 *p;
p = reserve_space(xdr, 8);
switch (fmode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
*p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
break;
case FMODE_WRITE:
*p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
break;
case FMODE_READ|FMODE_WRITE:
*p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
break;
default:
*p++ = cpu_to_be32(0);
}
*p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
}
static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
/*
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
* owner 4 = 32
*/
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(OP_OPEN);
*p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
p = reserve_space(xdr, 32);
p = xdr_encode_hyper(p, arg->clientid);
*p++ = cpu_to_be32(20);
p = xdr_encode_opaque_fixed(p, "open id:", 8);
*p++ = cpu_to_be32(arg->server->s_dev);
xdr_encode_hyper(p, arg->id);
}
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
struct nfs_client *clp;
p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
case 0:
*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
clp = arg->server->nfs_client;
if (clp->cl_mvops->minor_version > 0) {
if (nfs4_has_persistent_session(clp)) {
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
encode_attrs(xdr, arg->u.attrs, arg->server);
} else {
struct iattr dummy;
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
encode_nfs4_verifier(xdr, &arg->u.verifier);
dummy.ia_valid = 0;
encode_attrs(xdr, &dummy, arg->server);
}
} else {
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
encode_nfs4_verifier(xdr, &arg->u.verifier);
}
}
}
static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
p = reserve_space(xdr, 4);
switch (arg->open_flags & O_CREAT) {
case 0:
*p = cpu_to_be32(NFS4_OPEN_NOCREATE);
break;
default:
BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
*p = cpu_to_be32(NFS4_OPEN_CREATE);
encode_createmode(xdr, arg);
}
}
static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delegation_type)
{
__be32 *p;
p = reserve_space(xdr, 4);
switch (delegation_type) {
case 0:
*p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
break;
case FMODE_READ:
*p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
break;
case FMODE_WRITE|FMODE_READ:
*p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
break;
default:
BUG();
}
}
static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL);
encode_string(xdr, name->len, name->name);
}
static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS);
encode_delegation_type(xdr, type);
}
static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
encode_string(xdr, name->len, name->name);
}
static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr)
{
encode_openhdr(xdr, arg);
encode_opentype(xdr, arg);
switch (arg->claim) {
case NFS4_OPEN_CLAIM_NULL:
encode_claim_null(xdr, arg->name);
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
encode_claim_previous(xdr, arg->u.delegation_type);
break;
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation);
break;
default:
BUG();
}
hdr->nops++;
hdr->replen += decode_open_maxsz;
}
static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
*p++ = cpu_to_be32(OP_OPEN_CONFIRM);
p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
*p = cpu_to_be32(arg->seqid->sequence->counter);
hdr->nops++;
hdr->replen += decode_open_confirm_maxsz;
}
static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
*p++ = cpu_to_be32(OP_OPEN_DOWNGRADE);
p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
*p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
hdr->nops++;
hdr->replen += decode_open_downgrade_maxsz;
}
static void
encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr)
{
int len = fh->size;
__be32 *p;
p = reserve_space(xdr, 8 + len);
*p++ = cpu_to_be32(OP_PUTFH);
xdr_encode_opaque(p, fh->data, len);
hdr->nops++;
hdr->replen += decode_putfh_maxsz;
}
static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_PUTROOTFH);
hdr->nops++;
hdr->replen += decode_putrootfh_maxsz;
}
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid)
{
nfs4_stateid stateid;
__be32 *p;
p = reserve_space(xdr, NFS4_STATEID_SIZE);
if (ctx->state != NULL) {
nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
if (zero_seqid)
stateid.stateid.seqid = 0;
xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
} else
xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
}
static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_READ);
encode_stateid(xdr, args->context, args->lock_context,
hdr->minorversion);
p = reserve_space(xdr, 12);
p = xdr_encode_hyper(p, args->offset);
*p = cpu_to_be32(args->count);
hdr->nops++;
hdr->replen += decode_read_maxsz;
}
static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
{
uint32_t attrs[2] = {
FATTR4_WORD0_RDATTR_ERROR,
FATTR4_WORD1_MOUNTED_ON_FILEID,
};
uint32_t dircount = readdir->count >> 1;
__be32 *p;
if (readdir->plus) {
attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE|
FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID;
attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER|
FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV|
FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS|
FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
dircount >>= 1;
}
/* Use mounted_on_fileid only if the server supports it */
if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID))
attrs[0] |= FATTR4_WORD0_FILEID;
p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20);
*p++ = cpu_to_be32(OP_READDIR);
p = xdr_encode_hyper(p, readdir->cookie);
p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE);
*p++ = cpu_to_be32(dircount);
*p++ = cpu_to_be32(readdir->count);
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]);
*p = cpu_to_be32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
hdr->replen += decode_readdir_maxsz;
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
__func__,
(unsigned long long)readdir->cookie,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1],
attrs[0] & readdir->bitmask[0],
attrs[1] & readdir->bitmask[1]);
}
static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_READLINK);
hdr->nops++;
hdr->replen += decode_readlink_maxsz;
}
static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8 + name->len);
*p++ = cpu_to_be32(OP_REMOVE);
xdr_encode_opaque(p, name->name, name->len);
hdr->nops++;
hdr->replen += decode_remove_maxsz;
}
static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_RENAME);
encode_string(xdr, oldname->len, oldname->name);
encode_string(xdr, newname->len, newname->name);
hdr->nops++;
hdr->replen += decode_rename_maxsz;
}
static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 12);
*p++ = cpu_to_be32(OP_RENEW);
xdr_encode_hyper(p, client_stateid->cl_clientid);
hdr->nops++;
hdr->replen += decode_renew_maxsz;
}
static void
encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_RESTOREFH);
hdr->nops++;
hdr->replen += decode_restorefh_maxsz;
}
static void
encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_SETATTR);
xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
p = reserve_space(xdr, 2*4);
*p++ = cpu_to_be32(1);
*p = cpu_to_be32(FATTR4_WORD0_ACL);
BUG_ON(arg->acl_len % 4);
p = reserve_space(xdr, 4);
*p = cpu_to_be32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
hdr->replen += decode_setacl_maxsz;
}
static void
encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_SAVEFH);
hdr->nops++;
hdr->replen += decode_savefh_maxsz;
}
static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_SETATTR);
xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_setattr_maxsz;
encode_attrs(xdr, arg->iap, server);
}
static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE);
*p++ = cpu_to_be32(OP_SETCLIENTID);
xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
p = reserve_space(xdr, 4);
*p = cpu_to_be32(setclientid->sc_prog);
encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
p = reserve_space(xdr, 4);
*p = cpu_to_be32(setclientid->sc_cb_ident);
hdr->nops++;
hdr->replen += decode_setclientid_maxsz;
}
static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
*p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
p = xdr_encode_hyper(p, arg->clientid);
xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
hdr->replen += decode_setclientid_confirm_maxsz;
}
static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_WRITE);
encode_stateid(xdr, args->context, args->lock_context,
hdr->minorversion);
p = reserve_space(xdr, 16);
p = xdr_encode_hyper(p, args->offset);
*p++ = cpu_to_be32(args->stable);
*p = cpu_to_be32(args->count);
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
hdr->nops++;
hdr->replen += decode_write_maxsz;
}
static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_DELEGRETURN);
xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_delegreturn_maxsz;
}
static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
int len = name->len;
__be32 *p;
p = reserve_space(xdr, 8 + len);
*p++ = cpu_to_be32(OP_SECINFO);
xdr_encode_opaque(p, name->name, len);
hdr->nops++;
hdr->replen += decode_secinfo_maxsz;
}
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
static void encode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4 + sizeof(args->verifier->data));
*p++ = cpu_to_be32(OP_EXCHANGE_ID);
xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data));
encode_string(xdr, args->id_len, args->id);
p = reserve_space(xdr, 12);
*p++ = cpu_to_be32(args->flags);
*p++ = cpu_to_be32(0); /* zero length state_protect4_a */
*p = cpu_to_be32(0); /* zero length implementation id array */
hdr->nops++;
hdr->replen += decode_exchange_id_maxsz;
}
static void encode_create_session(struct xdr_stream *xdr,
struct nfs41_create_session_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
uint32_t len;
struct nfs_client *clp = args->client;
u32 max_resp_sz_cached;
/*
* Assumes OPEN is the biggest non-idempotent compound.
* 2 is the verifier.
*/
max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
len = scnprintf(machine_name, sizeof(machine_name), "%s",
clp->cl_ipaddr);
p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
*p++ = cpu_to_be32(OP_CREATE_SESSION);
p = xdr_encode_hyper(p, clp->cl_clientid);
*p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
*p++ = cpu_to_be32(args->flags); /*flags */
/* Fore Channel */
*p++ = cpu_to_be32(0); /* header padding size */
*p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
*p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */
*p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */
*p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */
*p++ = cpu_to_be32(0); /* rdmachannel_attrs */
/* Back Channel */
*p++ = cpu_to_be32(0); /* header padding size */
*p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
*p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
*p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
*p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */
*p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */
*p++ = cpu_to_be32(0); /* rdmachannel_attrs */
*p++ = cpu_to_be32(args->cb_program); /* cb_program */
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
/* authsys_parms rfc1831 */
*p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
p = xdr_encode_opaque(p, machine_name, len);
*p++ = cpu_to_be32(0); /* UID */
*p++ = cpu_to_be32(0); /* GID */
*p = cpu_to_be32(0); /* No more gids */
hdr->nops++;
hdr->replen += decode_create_session_maxsz;
}
static void encode_destroy_session(struct xdr_stream *xdr,
struct nfs4_session *session,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(OP_DESTROY_SESSION);
xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
hdr->nops++;
hdr->replen += decode_destroy_session_maxsz;
}
static void encode_reclaim_complete(struct xdr_stream *xdr,
struct nfs41_reclaim_complete_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(OP_RECLAIM_COMPLETE);
*p++ = cpu_to_be32(args->one_fs);
hdr->nops++;
hdr->replen += decode_reclaim_complete_maxsz;
}
#endif /* CONFIG_NFS_V4_1 */
static void encode_sequence(struct xdr_stream *xdr,
const struct nfs4_sequence_args *args,
struct compound_hdr *hdr)
{
#if defined(CONFIG_NFS_V4_1)
struct nfs4_session *session = args->sa_session;
struct nfs4_slot_table *tp;
struct nfs4_slot *slot;
__be32 *p;
if (!session)
return;
tp = &session->fc_slot_table;
WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
slot = tp->slots + args->sa_slotid;
p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16);
*p++ = cpu_to_be32(OP_SEQUENCE);
/*
* Sessionid + seqid + slotid + max slotid + cache_this
*/
dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d "
"max_slotid=%d cache_this=%d\n",
__func__,
((u32 *)session->sess_id.data)[0],
((u32 *)session->sess_id.data)[1],
((u32 *)session->sess_id.data)[2],
((u32 *)session->sess_id.data)[3],
slot->seq_nr, args->sa_slotid,
tp->highest_used_slotid, args->sa_cache_this);
p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(slot->seq_nr);
*p++ = cpu_to_be32(args->sa_slotid);
*p++ = cpu_to_be32(tp->highest_used_slotid);
*p = cpu_to_be32(args->sa_cache_this);
hdr->nops++;
hdr->replen += decode_sequence_maxsz;
#endif /* CONFIG_NFS_V4_1 */
}
#ifdef CONFIG_NFS_V4_1
static void
encode_getdevicelist(struct xdr_stream *xdr,
const struct nfs4_getdevicelist_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
nfs4_verifier dummy = {
.data = "dummmmmy",
};
p = reserve_space(xdr, 20);
*p++ = cpu_to_be32(OP_GETDEVICELIST);
*p++ = cpu_to_be32(args->layoutclass);
*p++ = cpu_to_be32(NFS4_PNFS_GETDEVLIST_MAXNUM);
xdr_encode_hyper(p, 0ULL); /* cookie */
encode_nfs4_verifier(xdr, &dummy);
hdr->nops++;
hdr->replen += decode_getdevicelist_maxsz;
}
static void
encode_getdeviceinfo(struct xdr_stream *xdr,
const struct nfs4_getdeviceinfo_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 16 + NFS4_DEVICEID4_SIZE);
*p++ = cpu_to_be32(OP_GETDEVICEINFO);
p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data,
NFS4_DEVICEID4_SIZE);
*p++ = cpu_to_be32(args->pdev->layout_type);
*p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */
*p++ = cpu_to_be32(0); /* bitmap length 0 */
hdr->nops++;
hdr->replen += decode_getdeviceinfo_maxsz;
}
static void
encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_LAYOUTGET);
*p++ = cpu_to_be32(0); /* Signal layout available */
*p++ = cpu_to_be32(args->type);
*p++ = cpu_to_be32(args->range.iomode);
p = xdr_encode_hyper(p, args->range.offset);
p = xdr_encode_hyper(p, args->range.length);
p = xdr_encode_hyper(p, args->minlength);
p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
*p = cpu_to_be32(args->maxcount);
dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
__func__,
args->type,
args->range.iomode,
(unsigned long)args->range.offset,
(unsigned long)args->range.length,
args->maxcount);
hdr->nops++;
hdr->replen += decode_layoutget_maxsz;
}
static int
encode_layoutcommit(struct xdr_stream *xdr,
struct inode *inode,
const struct nfs4_layoutcommit_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten,
NFS_SERVER(args->inode)->pnfs_curr_ld->id);
p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_LAYOUTCOMMIT);
/* Only whole file layouts */
p = xdr_encode_hyper(p, 0); /* offset */
p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */
*p++ = cpu_to_be32(0); /* reclaim */
p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(1); /* newoffset = TRUE */
p = xdr_encode_hyper(p, args->lastbytewritten);
*p++ = cpu_to_be32(0); /* Never send time_modify_changed */
*p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit)
NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit(
NFS_I(inode)->layout, xdr, args);
else {
p = reserve_space(xdr, 4);
*p = cpu_to_be32(0); /* no layout-type payload */
}
hdr->nops++;
hdr->replen += decode_layoutcommit_maxsz;
return 0;
}
static void
encode_layoutreturn(struct xdr_stream *xdr,
const struct nfs4_layoutreturn_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 20);
*p++ = cpu_to_be32(OP_LAYOUTRETURN);
*p++ = cpu_to_be32(0); /* reclaim. always 0 for now */
*p++ = cpu_to_be32(args->layout_type);
*p++ = cpu_to_be32(IOMODE_ANY);
*p = cpu_to_be32(RETURN_FILE);
p = reserve_space(xdr, 16 + NFS4_STATEID_SIZE);
p = xdr_encode_hyper(p, 0);
p = xdr_encode_hyper(p, NFS4_MAX_UINT64);
spin_lock(&args->inode->i_lock);
xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
spin_unlock(&args->inode->i_lock);
if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) {
NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn(
NFS_I(args->inode)->layout, xdr, args);
} else {
p = reserve_space(xdr, 4);
*p = cpu_to_be32(0);
}
hdr->nops++;
hdr->replen += decode_layoutreturn_maxsz;
}
static int
encode_secinfo_no_name(struct xdr_stream *xdr,
const struct nfs41_secinfo_no_name_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(OP_SECINFO_NO_NAME);
*p++ = cpu_to_be32(args->style);
hdr->nops++;
hdr->replen += decode_secinfo_no_name_maxsz;
return 0;
}
static void encode_test_stateid(struct xdr_stream *xdr,
struct nfs41_test_stateid_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 8 + NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_TEST_STATEID);
*p++ = cpu_to_be32(1);
xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_test_stateid_maxsz;
}
static void encode_free_stateid(struct xdr_stream *xdr,
struct nfs41_free_stateid_args *args,
struct compound_hdr *hdr)
{
__be32 *p;
p = reserve_space(xdr, 4 + NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(OP_FREE_STATEID);
xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_free_stateid_maxsz;
}
#endif /* CONFIG_NFS_V4_1 */
/*
* END OF "GENERIC" ENCODE ROUTINES.
*/
static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
{
#if defined(CONFIG_NFS_V4_1)
if (args->sa_session)
return args->sa_session->clp->cl_mvops->minor_version;
#endif /* CONFIG_NFS_V4_1 */
return 0;
}
/*
* Encode an ACCESS request
*/
static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_accessargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_access(xdr, args->access, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode LOOKUP request
*/
static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_lookup_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_lookup(xdr, args->name, &hdr);
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode LOOKUP_ROOT request
*/
static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs4_lookup_root_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putrootfh(xdr, &hdr);
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode REMOVE request
*/
static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs_removeargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_remove(xdr, &args->name, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode RENAME request
*/
static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs_renameargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->old_dir, &hdr);
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->new_dir, &hdr);
encode_rename(xdr, args->old_name, args->new_name, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode LINK request
*/
static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_link_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_link(xdr, args->name, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode CREATE request
*/
static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_create_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_savefh(xdr, &hdr);
encode_create(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode SYMLINK request
*/
static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_create_arg *args)
{
nfs4_xdr_enc_create(req, xdr, args);
}
/*
* Encode GETATTR request
*/
static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_getattr_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode a CLOSE request
*/
static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_closeargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_close(xdr, args, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode an OPEN request
*/
static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_openargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_savefh(xdr, &hdr);
encode_open(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode an OPEN_CONFIRM request
*/
static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_open_confirmargs *args)
{
struct compound_hdr hdr = {
.nops = 0,
};
encode_compound_hdr(xdr, req, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_open_confirm(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode an OPEN request with no attributes.
*/
static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_openargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_open(xdr, args, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode an OPEN_DOWNGRADE request
*/
static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_closeargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_open_downgrade(xdr, args, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode a LOCK request
*/
static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_lock_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_lock(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode a LOCKT request
*/
static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_lockt_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_lockt(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode a LOCKU request
*/
static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_locku_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_locku(xdr, args, &hdr);
encode_nops(&hdr);
}
static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_release_lockowner_args *args)
{
struct compound_hdr hdr = {
.minorversion = 0,
};
encode_compound_hdr(xdr, req, &hdr);
encode_release_lockowner(xdr, &args->lock_owner, &hdr);
encode_nops(&hdr);
}
/*
* Encode a READLINK request
*/
static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_readlink *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_readlink(xdr, args, req, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->pglen);
encode_nops(&hdr);
}
/*
* Encode a READDIR request
*/
static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_readdir_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_readdir(xdr, args, req, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->count);
dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
__func__, hdr.replen << 2, args->pages,
args->pgbase, args->count);
encode_nops(&hdr);
}
/*
* Encode a READ request
*/
static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_readargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_read(xdr, args, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
args->pages, args->pgbase, args->count);
req->rq_rcv_buf.flags |= XDRBUF_READ;
encode_nops(&hdr);
}
/*
* Encode an SETATTR request
*/
static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_setattrargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_setattr(xdr, args, args->server, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode a GETACL request
*/
static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_getaclargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
uint32_t replen;
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
encode_nops(&hdr);
}
/*
* Encode a WRITE request
*/
static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_writeargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_write(xdr, args, &hdr);
req->rq_snd_buf.flags |= XDRBUF_WRITE;
if (args->bitmask)
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* a COMMIT request
*/
static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_writeargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_commit(xdr, args, &hdr);
if (args->bitmask)
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* FSINFO request
*/
static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_fsinfo_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_fsinfo(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* a PATHCONF request
*/
static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_pathconf_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
&hdr);
encode_nops(&hdr);
}
/*
* a STATFS request
*/
static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_statfs_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
encode_nops(&hdr);
}
/*
* GETATTR_BITMAP request
*/
static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_server_caps_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fhandle, &hdr);
encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
FATTR4_WORD0_LINK_SUPPORT|
FATTR4_WORD0_SYMLINK_SUPPORT|
FATTR4_WORD0_ACLSUPPORT, &hdr);
encode_nops(&hdr);
}
/*
* a RENEW request
*/
static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_client *clp)
{
struct compound_hdr hdr = {
.nops = 0,
};
encode_compound_hdr(xdr, req, &hdr);
encode_renew(xdr, clp, &hdr);
encode_nops(&hdr);
}
/*
* a SETCLIENTID request
*/
static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_setclientid *sc)
{
struct compound_hdr hdr = {
.nops = 0,
};
encode_compound_hdr(xdr, req, &hdr);
encode_setclientid(xdr, sc, &hdr);
encode_nops(&hdr);
}
/*
* a SETCLIENTID_CONFIRM request
*/
static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_setclientid_res *arg)
{
struct compound_hdr hdr = {
.nops = 0,
};
const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
encode_compound_hdr(xdr, req, &hdr);
encode_setclientid_confirm(xdr, arg, &hdr);
encode_putrootfh(xdr, &hdr);
encode_fsinfo(xdr, lease_bitmap, &hdr);
encode_nops(&hdr);
}
/*
* DELEGRETURN request
*/
static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfs4_delegreturnargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fhandle, &hdr);
encode_delegreturn(xdr, args->stateid, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode FS_LOCATIONS request
*/
static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_fs_locations_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
uint32_t replen;
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_lookup(xdr, args->name, &hdr);
replen = hdr.replen; /* get the attribute into args->page */
encode_fs_locations(xdr, args->bitmask, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
0, PAGE_SIZE);
encode_nops(&hdr);
}
/*
* Encode SECINFO request
*/
static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_secinfo_arg *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_secinfo(xdr, args->name, &hdr);
encode_nops(&hdr);
}
#if defined(CONFIG_NFS_V4_1)
/*
* EXCHANGE_ID request
*/
static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_exchange_id_args *args)
{
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
encode_compound_hdr(xdr, req, &hdr);
encode_exchange_id(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* a CREATE_SESSION request
*/
static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_create_session_args *args)
{
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
encode_compound_hdr(xdr, req, &hdr);
encode_create_session(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* a DESTROY_SESSION request
*/
static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_session *session)
{
struct compound_hdr hdr = {
.minorversion = session->clp->cl_mvops->minor_version,
};
encode_compound_hdr(xdr, req, &hdr);
encode_destroy_session(xdr, session, &hdr);
encode_nops(&hdr);
}
/*
* a SEQUENCE request
*/
static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_sequence_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* a GET_LEASE_TIME request
*/
static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_get_lease_time_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
};
const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->la_seq_args, &hdr);
encode_putrootfh(xdr, &hdr);
encode_fsinfo(xdr, lease_bitmap, &hdr);
encode_nops(&hdr);
}
/*
* a RECLAIM_COMPLETE request
*/
static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_reclaim_complete_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args)
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_reclaim_complete(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode GETDEVICELIST request
*/
static void nfs4_xdr_enc_getdevicelist(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_getdevicelist_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_getdevicelist(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode GETDEVICEINFO request
*/
static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_getdeviceinfo_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_getdeviceinfo(xdr, args, &hdr);
/* set up reply kvec. Subtract notification bitmap max size (2)
* so that notification bitmap is put in xdr_buf tail */
xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2,
args->pdev->pages, args->pdev->pgbase,
args->pdev->pglen);
encode_nops(&hdr);
}
/*
* Encode LAYOUTGET request
*/
static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_layoutget_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutget(xdr, args, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
args->layout.pages, 0, args->layout.pglen);
encode_nops(&hdr);
}
/*
* Encode LAYOUTCOMMIT request
*/
static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_layoutcommit_args *args)
{
struct nfs4_layoutcommit_data *data =
container_of(args, struct nfs4_layoutcommit_data, args);
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutcommit(xdr, data->args.inode, args, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
/*
* Encode LAYOUTRETURN request
*/
static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_layoutreturn_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, NFS_FH(args->inode), &hdr);
encode_layoutreturn(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode SECINFO_NO_NAME request
*/
static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_secinfo_no_name_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putrootfh(xdr, &hdr);
encode_secinfo_no_name(xdr, args, &hdr);
encode_nops(&hdr);
return 0;
}
/*
* Encode TEST_STATEID request
*/
static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_test_stateid_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_test_stateid(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Encode FREE_STATEID request
*/
static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_free_stateid_args *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_free_stateid(xdr, args, &hdr);
encode_nops(&hdr);
}
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
{
dprintk("nfs: %s: prematurely hit end of receive buffer. "
"Remaining buffer length is %tu words.\n",
func, xdr->end - xdr->p);
}
static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, *len);
if (unlikely(!p))
goto out_overflow;
*string = (char *)p;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
hdr->status = be32_to_cpup(p++);
hdr->taglen = be32_to_cpup(p);
p = xdr_inline_decode(xdr, hdr->taglen + 4);
if (unlikely(!p))
goto out_overflow;
hdr->tag = (char *)p;
p += XDR_QUADLEN(hdr->taglen);
hdr->nops = be32_to_cpup(p);
if (unlikely(hdr->nops < 1))
return nfs4_stat_to_errno(hdr->status);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
{
__be32 *p;
uint32_t opnum;
int32_t nfserr;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
opnum = be32_to_cpup(p++);
if (opnum != expected) {
dprintk("nfs: Server returned operation"
" %d but we issued a request for %d\n",
opnum, expected);
return -EIO;
}
nfserr = be32_to_cpup(p);
if (nfserr != NFS_OK)
return nfs4_stat_to_errno(nfserr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/* Dummy routine */
static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp)
{
__be32 *p;
unsigned int strlen;
char *str;
p = xdr_inline_decode(xdr, 12);
if (likely(p))
return decode_opaque_inline(xdr, &strlen, &str);
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
{
uint32_t bmlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
bmlen = be32_to_cpup(p);
bitmap[0] = bitmap[1] = bitmap[2] = 0;
p = xdr_inline_decode(xdr, (bmlen << 2));
if (unlikely(!p))
goto out_overflow;
if (bmlen > 0) {
bitmap[0] = be32_to_cpup(p++);
if (bmlen > 1) {
bitmap[1] = be32_to_cpup(p++);
if (bmlen > 2)
bitmap[2] = be32_to_cpup(p);
}
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*attrlen = be32_to_cpup(p);
*savep = xdr->p;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
{
if (likely(bitmap[0] & FATTR4_WORD0_SUPPORTED_ATTRS)) {
int ret;
ret = decode_attr_bitmap(xdr, bitmask);
if (unlikely(ret < 0))
return ret;
bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
} else
bitmask[0] = bitmask[1] = bitmask[2] = 0;
dprintk("%s: bitmask=%08x:%08x:%08x\n", __func__,
bitmask[0], bitmask[1], bitmask[2]);
return 0;
}
static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *type)
{
__be32 *p;
int ret = 0;
*type = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*type = be32_to_cpup(p);
if (*type < NF4REG || *type > NF4NAMEDATTR) {
dprintk("%s: bad type %d\n", __func__, *type);
return -EIO;
}
bitmap[0] &= ~FATTR4_WORD0_TYPE;
ret = NFS_ATTR_FATTR_TYPE;
}
dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
{
__be32 *p;
int ret = 0;
*change = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, change);
bitmap[0] &= ~FATTR4_WORD0_CHANGE;
ret = NFS_ATTR_FATTR_CHANGE;
}
dprintk("%s: change attribute=%Lu\n", __func__,
(unsigned long long)*change);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
{
__be32 *p;
int ret = 0;
*size = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, size);
bitmap[0] &= ~FATTR4_WORD0_SIZE;
ret = NFS_ATTR_FATTR_SIZE;
}
dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid)
{
__be32 *p;
int ret = 0;
fsid->major = 0;
fsid->minor = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
p = xdr_inline_decode(xdr, 16);
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &fsid->major);
xdr_decode_hyper(p, &fsid->minor);
bitmap[0] &= ~FATTR4_WORD0_FSID;
ret = NFS_ATTR_FATTR_FSID;
}
dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __func__,
(unsigned long long)fsid->major,
(unsigned long long)fsid->minor);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
*res = 60;
if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
}
dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
{
__be32 *p;
if (unlikely(bitmap[0] & (FATTR4_WORD0_RDATTR_ERROR - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_RDATTR_ERROR)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
*res = -be32_to_cpup(p);
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh)
{
__be32 *p;
int len;
if (fh != NULL)
memset(fh, 0, sizeof(*fh));
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEHANDLE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILEHANDLE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
if (len > NFS4_FHSIZE)
return -EIO;
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
if (fh != NULL) {
memcpy(fh->data, p, len);
fh->size = len;
}
bitmap[0] &= ~FATTR4_WORD0_FILEHANDLE;
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
*res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
}
dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
{
__be32 *p;
int ret = 0;
*fileid = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, fileid);
bitmap[0] &= ~FATTR4_WORD0_FILEID;
ret = NFS_ATTR_FATTR_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
{
__be32 *p;
int ret = 0;
*fileid = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, fileid);
bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
}
dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
}
dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
}
dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
{
u32 n;
__be32 *p;
int status = 0;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
n = be32_to_cpup(p);
if (n == 0)
goto root_path;
dprintk("path ");
path->ncomponents = 0;
while (path->ncomponents < n) {
struct nfs4_string *component = &path->components[path->ncomponents];
status = decode_opaque_inline(xdr, &component->len, &component->data);
if (unlikely(status != 0))
goto out_eio;
if (path->ncomponents != n)
dprintk("/");
dprintk("%s", component->data);
if (path->ncomponents < NFS4_PATHNAME_MAXCOMPONENTS)
path->ncomponents++;
else {
dprintk("cannot parse %d components in path\n", n);
goto out_eio;
}
}
out:
dprintk("\n");
return status;
root_path:
/* a root pathname is sent as a zero component4 */
path->ncomponents = 1;
path->components[0].len=0;
path->components[0].data=NULL;
dprintk("path /\n");
goto out;
out_eio:
dprintk(" status %d", status);
status = -EIO;
goto out;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res)
{
int n;
__be32 *p;
int status = -EIO;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FS_LOCATIONS -1U)))
goto out;
status = 0;
if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS)))
goto out;
dprintk("%s: fsroot ", __func__);
status = decode_pathname(xdr, &res->fs_path);
if (unlikely(status != 0))
goto out;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
n = be32_to_cpup(p);
if (n <= 0)
goto out_eio;
res->nlocations = 0;
while (res->nlocations < n) {
u32 m;
struct nfs4_fs_location *loc = &res->locations[res->nlocations];
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
m = be32_to_cpup(p);
loc->nservers = 0;
dprintk("%s: servers ", __func__);
while (loc->nservers < m) {
struct nfs4_string *server = &loc->servers[loc->nservers];
status = decode_opaque_inline(xdr, &server->len, &server->data);
if (unlikely(status != 0))
goto out_eio;
dprintk("%s ", server->data);
if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS)
loc->nservers++;
else {
unsigned int i;
dprintk("%s: using first %u of %u servers "
"returned for location %u\n",
__func__,
NFS4_FS_LOCATION_MAXSERVERS,
m, res->nlocations);
for (i = loc->nservers; i < m; i++) {
unsigned int len;
char *data;
status = decode_opaque_inline(xdr, &len, &data);
if (unlikely(status != 0))
goto out_eio;
}
}
}
status = decode_pathname(xdr, &loc->rootpath);
if (unlikely(status != 0))
goto out_eio;
if (res->nlocations < NFS4_FS_LOCATIONS_MAXENTRIES)
res->nlocations++;
}
if (res->nlocations != 0)
status = NFS_ATTR_FATTR_V4_REFERRAL;
out:
dprintk("%s: fs_locations done, error = %d\n", __func__, status);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
out_eio:
status = -EIO;
goto out;
}
static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
}
dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
{
__be32 *p;
int status = 0;
*maxlink = 1;
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*maxlink = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
}
dprintk("%s: maxlink=%u\n", __func__, *maxlink);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
{
__be32 *p;
int status = 0;
*maxname = 1024;
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*maxname = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
}
dprintk("%s: maxname=%u\n", __func__, *maxname);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
int status = 0;
*res = 1024;
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXREAD - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) {
uint64_t maxread;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, &maxread);
if (maxread > 0x7FFFFFFF)
maxread = 0x7FFFFFFF;
*res = (uint32_t)maxread;
bitmap[0] &= ~FATTR4_WORD0_MAXREAD;
}
dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
int status = 0;
*res = 1024;
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXWRITE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) {
uint64_t maxwrite;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, &maxwrite);
if (maxwrite > 0x7FFFFFFF)
maxwrite = 0x7FFFFFFF;
*res = (uint32_t)maxwrite;
bitmap[0] &= ~FATTR4_WORD0_MAXWRITE;
}
dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
{
uint32_t tmp;
__be32 *p;
int ret = 0;
*mode = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
tmp = be32_to_cpup(p);
*mode = tmp & ~S_IFMT;
bitmap[1] &= ~FATTR4_WORD1_MODE;
ret = NFS_ATTR_FATTR_MODE;
}
dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
{
__be32 *p;
int ret = 0;
*nlink = 1;
if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
*nlink = be32_to_cpup(p);
bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
ret = NFS_ATTR_FATTR_NLINK;
}
dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
const struct nfs_server *server, uint32_t *uid, int may_sleep)
{
uint32_t len;
__be32 *p;
int ret = 0;
*uid = -2;
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
if (!may_sleep) {
/* do nothing */
} else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0)
ret = NFS_ATTR_FATTR_OWNER;
else
dprintk("%s: nfs_map_name_to_uid failed!\n",
__func__);
} else
dprintk("%s: name too long (%u)!\n",
__func__, len);
bitmap[1] &= ~FATTR4_WORD1_OWNER;
}
dprintk("%s: uid=%d\n", __func__, (int)*uid);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
const struct nfs_server *server, uint32_t *gid, int may_sleep)
{
uint32_t len;
__be32 *p;
int ret = 0;
*gid = -2;
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
if (!may_sleep) {
/* do nothing */
} else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0)
ret = NFS_ATTR_FATTR_GROUP;
else
dprintk("%s: nfs_map_group_to_gid failed!\n",
__func__);
} else
dprintk("%s: name too long (%u)!\n",
__func__, len);
bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP;
}
dprintk("%s: gid=%d\n", __func__, (int)*gid);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
{
uint32_t major = 0, minor = 0;
__be32 *p;
int ret = 0;
*rdev = MKDEV(0,0);
if (unlikely(bitmap[1] & (FATTR4_WORD1_RAWDEV - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) {
dev_t tmp;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
major = be32_to_cpup(p++);
minor = be32_to_cpup(p);
tmp = MKDEV(major, minor);
if (MAJOR(tmp) == major && MINOR(tmp) == minor)
*rdev = tmp;
bitmap[1] &= ~ FATTR4_WORD1_RAWDEV;
ret = NFS_ATTR_FATTR_RDEV;
}
dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
}
dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
}
dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
{
__be32 *p;
int status = 0;
*res = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
}
dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
{
__be32 *p;
int ret = 0;
*used = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, used);
bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
ret = NFS_ATTR_FATTR_SPACE_USED;
}
dprintk("%s: space used=%Lu\n", __func__,
(unsigned long long)*used);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
{
__be32 *p;
uint64_t sec;
uint32_t nsec;
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &sec);
nsec = be32_to_cpup(p);
time->tv_sec = (time_t)sec;
time->tv_nsec = (long)nsec;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
{
int status = 0;
time->tv_sec = 0;
time->tv_nsec = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_ACCESS - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_TIME_ACCESS)) {
status = decode_attr_time(xdr, time);
if (status == 0)
status = NFS_ATTR_FATTR_ATIME;
bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
}
dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec);
return status;
}
static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
{
int status = 0;
time->tv_sec = 0;
time->tv_nsec = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_METADATA - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) {
status = decode_attr_time(xdr, time);
if (status == 0)
status = NFS_ATTR_FATTR_CTIME;
bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
}
dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec);
return status;
}
static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap,
struct timespec *time)
{
int status = 0;
time->tv_sec = 0;
time->tv_nsec = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_DELTA - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_TIME_DELTA)) {
status = decode_attr_time(xdr, time);
bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA;
}
dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec,
(long)time->tv_nsec);
return status;
}
static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
{
int status = 0;
time->tv_sec = 0;
time->tv_nsec = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_MODIFY - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) {
status = decode_attr_time(xdr, time);
if (status == 0)
status = NFS_ATTR_FATTR_MTIME;
bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
}
dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec);
return status;
}
static int verify_attr_len(struct xdr_stream *xdr, __be32 *savep, uint32_t attrlen)
{
unsigned int attrwords = XDR_QUADLEN(attrlen);
unsigned int nwords = xdr->p - savep;
if (unlikely(attrwords != nwords)) {
dprintk("%s: server returned incorrect attribute length: "
"%u %c %u\n",
__func__,
attrwords << 2,
(attrwords < nwords) ? '<' : '>',
nwords << 2);
return -EIO;
}
return 0;
}
static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
{
__be32 *p;
p = xdr_inline_decode(xdr, 20);
if (unlikely(!p))
goto out_overflow;
cinfo->atomic = be32_to_cpup(p++);
p = xdr_decode_hyper(p, &cinfo->before);
xdr_decode_hyper(p, &cinfo->after);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
{
__be32 *p;
uint32_t supp, acc;
int status;
status = decode_op_hdr(xdr, OP_ACCESS);
if (status)
return status;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
supp = be32_to_cpup(p++);
acc = be32_to_cpup(p);
access->supported = supp;
access->access = acc;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
{
__be32 *p;
p = xdr_inline_decode(xdr, len);
if (likely(p)) {
memcpy(buf, p, len);
return 0;
}
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
}
static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
{
int status;
status = decode_op_hdr(xdr, OP_CLOSE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
status = decode_stateid(xdr, &res->stateid);
return status;
}
static int decode_verifier(struct xdr_stream *xdr, void *verifier)
{
return decode_opaque_fixed(xdr, verifier, 8);
}
static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
{
int status;
status = decode_op_hdr(xdr, OP_COMMIT);
if (!status)
status = decode_verifier(xdr, res->verf->verifier);
return status;
}
static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
{
__be32 *p;
uint32_t bmlen;
int status;
status = decode_op_hdr(xdr, OP_CREATE);
if (status)
return status;
if ((status = decode_change_info(xdr, cinfo)))
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
bmlen = be32_to_cpup(p);
p = xdr_inline_decode(xdr, bmlen << 2);
if (likely(p))
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
{
__be32 *savep;
uint32_t attrlen, bitmap[3] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto xdr_error;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto xdr_error;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto xdr_error;
if ((status = decode_attr_supported(xdr, bitmap, res->attr_bitmask)) != 0)
goto xdr_error;
if ((status = decode_attr_link_support(xdr, bitmap, &res->has_links)) != 0)
goto xdr_error;
if ((status = decode_attr_symlink_support(xdr, bitmap, &res->has_symlinks)) != 0)
goto xdr_error;
if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
{
__be32 *savep;
uint32_t attrlen, bitmap[3] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto xdr_error;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto xdr_error;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto xdr_error;
if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0)
goto xdr_error;
if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0)
goto xdr_error;
if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0)
goto xdr_error;
if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0)
goto xdr_error;
if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0)
goto xdr_error;
if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf)
{
__be32 *savep;
uint32_t attrlen, bitmap[3] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto xdr_error;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto xdr_error;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto xdr_error;
if ((status = decode_attr_maxlink(xdr, bitmap, &pathconf->max_link)) != 0)
goto xdr_error;
if ((status = decode_attr_maxname(xdr, bitmap, &pathconf->max_namelen)) != 0)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
struct nfs_fattr *fattr, struct nfs_fh *fh,
const struct nfs_server *server, int may_sleep)
{
int status;
umode_t fmode = 0;
uint32_t type;
int32_t err;
status = decode_attr_type(xdr, bitmap, &type);
if (status < 0)
goto xdr_error;
fattr->mode = 0;
if (status != 0) {
fattr->mode |= nfs_type2fmt[type];
fattr->valid |= status;
}
status = decode_attr_change(xdr, bitmap, &fattr->change_attr);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_size(xdr, bitmap, &fattr->size);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_fsid(xdr, bitmap, &fattr->fsid);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
err = 0;
status = decode_attr_error(xdr, bitmap, &err);
if (status < 0)
goto xdr_error;
if (err == -NFS4ERR_WRONGSEC)
nfs_fixup_secinfo_attributes(fattr, fh);
status = decode_attr_filehandle(xdr, bitmap, fh);
if (status < 0)
goto xdr_error;
status = decode_attr_fileid(xdr, bitmap, &fattr->fileid);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_fs_locations(xdr, bitmap, container_of(fattr,
struct nfs4_fs_locations,
fattr));
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_mode(xdr, bitmap, &fmode);
if (status < 0)
goto xdr_error;
if (status != 0) {
fattr->mode |= fmode;
fattr->valid |= status;
}
status = decode_attr_nlink(xdr, bitmap, &fattr->nlink);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_rdev(xdr, bitmap, &fattr->rdev);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_space_used(xdr, bitmap, &fattr->du.nfs3.used);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_time_access(xdr, bitmap, &fattr->atime);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
status = decode_attr_mounted_on_fileid(xdr, bitmap, &fattr->mounted_on_fileid);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
xdr_error:
dprintk("%s: xdr returned %d\n", __func__, -status);
return status;
}
static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr,
struct nfs_fh *fh, const struct nfs_server *server, int may_sleep)
{
__be32 *savep;
uint32_t attrlen,
bitmap[3] = {0};
int status;
status = decode_op_hdr(xdr, OP_GETATTR);
if (status < 0)
goto xdr_error;
status = decode_attr_bitmap(xdr, bitmap);
if (status < 0)
goto xdr_error;
status = decode_attr_length(xdr, &attrlen, &savep);
if (status < 0)
goto xdr_error;
status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep);
if (status < 0)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d\n", __func__, -status);
return status;
}
static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
const struct nfs_server *server, int may_sleep)
{
return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
}
/*
* Decode potentially multiple layout types. Currently we only support
* one layout driver per file system.
*/
static int decode_first_pnfs_layout_type(struct xdr_stream *xdr,
uint32_t *layouttype)
{
uint32_t *p;
int num;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
num = be32_to_cpup(p);
/* pNFS is not supported by the underlying file system */
if (num == 0) {
*layouttype = 0;
return 0;
}
if (num > 1)
printk(KERN_INFO "%s: Warning: Multiple pNFS layout drivers "
"per filesystem not supported\n", __func__);
/* Decode and set first layout type, move xdr->p past unused types */
p = xdr_inline_decode(xdr, num * 4);
if (unlikely(!p))
goto out_overflow;
*layouttype = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
/*
* The type of file system exported.
* Note we must ensure that layouttype is set in any non-error case.
*/
static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
uint32_t *layouttype)
{
int status = 0;
dprintk("%s: bitmap is %x\n", __func__, bitmap[1]);
if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U)))
return -EIO;
if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) {
status = decode_first_pnfs_layout_type(xdr, layouttype);
bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES;
} else
*layouttype = 0;
return status;
}
/*
* The prefered block size for layout directed io
*/
static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
uint32_t *res)
{
__be32 *p;
dprintk("%s: bitmap is %x\n", __func__, bitmap[2]);
*res = 0;
if (bitmap[2] & FATTR4_WORD2_LAYOUT_BLKSIZE) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p)) {
print_overflow_msg(__func__, xdr);
return -EIO;
}
*res = be32_to_cpup(p);
bitmap[2] &= ~FATTR4_WORD2_LAYOUT_BLKSIZE;
}
return 0;
}
static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
{
__be32 *savep;
uint32_t attrlen, bitmap[3];
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto xdr_error;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto xdr_error;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto xdr_error;
fsinfo->rtmult = fsinfo->wtmult = 512; /* ??? */
if ((status = decode_attr_lease_time(xdr, bitmap, &fsinfo->lease_time)) != 0)
goto xdr_error;
if ((status = decode_attr_maxfilesize(xdr, bitmap, &fsinfo->maxfilesize)) != 0)
goto xdr_error;
if ((status = decode_attr_maxread(xdr, bitmap, &fsinfo->rtmax)) != 0)
goto xdr_error;
fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax;
if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0)
goto xdr_error;
fsinfo->wtpref = fsinfo->wtmax;
status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta);
if (status != 0)
goto xdr_error;
status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype);
if (status != 0)
goto xdr_error;
status = decode_attr_layout_blksize(xdr, bitmap, &fsinfo->blksize);
if (status)
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
{
__be32 *p;
uint32_t len;
int status;
/* Zero handle first to allow comparisons */
memset(fh, 0, sizeof(*fh));
status = decode_op_hdr(xdr, OP_GETFH);
if (status)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
if (len > NFS4_FHSIZE)
return -EIO;
fh->size = len;
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
memcpy(fh->data, p, len);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
{
int status;
status = decode_op_hdr(xdr, OP_LINK);
if (status)
return status;
return decode_change_info(xdr, cinfo);
}
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
{
uint64_t offset, length, clientid;
__be32 *p;
uint32_t namelen, type;
p = xdr_inline_decode(xdr, 32); /* read 32 bytes */
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */
p = xdr_decode_hyper(p, &length);
type = be32_to_cpup(p++); /* 4 byte read */
if (fl != NULL) { /* manipulate file lock */
fl->fl_start = (loff_t)offset;
fl->fl_end = fl->fl_start + (loff_t)length - 1;
if (length == ~(uint64_t)0)
fl->fl_end = OFFSET_MAX;
fl->fl_type = F_WRLCK;
if (type & 1)
fl->fl_type = F_RDLCK;
fl->fl_pid = 0;
}
p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */
namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */
p = xdr_inline_decode(xdr, namelen); /* variable size field */
if (likely(p))
return -NFS4ERR_DENIED;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
{
int status;
status = decode_op_hdr(xdr, OP_LOCK);
if (status == -EIO)
goto out;
if (status == 0) {
status = decode_stateid(xdr, &res->stateid);
if (unlikely(status))
goto out;
} else if (status == -NFS4ERR_DENIED)
status = decode_lock_denied(xdr, NULL);
if (res->open_seqid != NULL)
nfs_increment_open_seqid(status, res->open_seqid);
nfs_increment_lock_seqid(status, res->lock_seqid);
out:
return status;
}
static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res)
{
int status;
status = decode_op_hdr(xdr, OP_LOCKT);
if (status == -NFS4ERR_DENIED)
return decode_lock_denied(xdr, res->denied);
return status;
}
static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
{
int status;
status = decode_op_hdr(xdr, OP_LOCKU);
if (status != -EIO)
nfs_increment_lock_seqid(status, res->seqid);
if (status == 0)
status = decode_stateid(xdr, &res->stateid);
return status;
}
static int decode_release_lockowner(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER);
}
static int decode_lookup(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_LOOKUP);
}
/* This is too sick! */
static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
{
__be32 *p;
uint32_t limit_type, nblocks, blocksize;
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
limit_type = be32_to_cpup(p++);
switch (limit_type) {
case 1:
xdr_decode_hyper(p, maxsize);
break;
case 2:
nblocks = be32_to_cpup(p++);
blocksize = be32_to_cpup(p);
*maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
{
__be32 *p;
uint32_t delegation_type;
int status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
delegation_type = be32_to_cpup(p);
if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
res->delegation_type = 0;
return 0;
}
status = decode_stateid(xdr, &res->delegation);
if (unlikely(status))
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
res->do_recall = be32_to_cpup(p);
switch (delegation_type) {
case NFS4_OPEN_DELEGATE_READ:
res->delegation_type = FMODE_READ;
break;
case NFS4_OPEN_DELEGATE_WRITE:
res->delegation_type = FMODE_WRITE|FMODE_READ;
if (decode_space_limit(xdr, &res->maxsize) < 0)
return -EIO;
}
return decode_ace(xdr, NULL, res->server->nfs_client);
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
{
__be32 *p;
uint32_t savewords, bmlen, i;
int status;
status = decode_op_hdr(xdr, OP_OPEN);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
status = decode_stateid(xdr, &res->stateid);
if (unlikely(status))
return status;
decode_change_info(xdr, &res->cinfo);
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
res->rflags = be32_to_cpup(p++);
bmlen = be32_to_cpup(p);
if (bmlen > 10)
goto xdr_error;
p = xdr_inline_decode(xdr, bmlen << 2);
if (unlikely(!p))
goto out_overflow;
savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE);
for (i = 0; i < savewords; ++i)
res->attrset[i] = be32_to_cpup(p++);
for (; i < NFS4_BITMAP_SIZE; i++)
res->attrset[i] = 0;
return decode_delegation(xdr, res);
xdr_error:
dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
{
int status;
status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
status = decode_stateid(xdr, &res->stateid);
return status;
}
static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res)
{
int status;
status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
status = decode_stateid(xdr, &res->stateid);
return status;
}
static int decode_putfh(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_PUTFH);
}
static int decode_putrootfh(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_PUTROOTFH);
}
static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
{
struct kvec *iov = req->rq_rcv_buf.head;
__be32 *p;
uint32_t count, eof, recvd, hdrlen;
int status;
status = decode_op_hdr(xdr, OP_READ);
if (status)
return status;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
eof = be32_to_cpup(p++);
count = be32_to_cpup(p);
hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
"count %u > recvd %u\n", count, recvd);
count = recvd;
eof = 0;
}
xdr_read_pages(xdr, count);
res->eof = eof;
res->count = count;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
size_t hdrlen;
u32 recvd, pglen = rcvbuf->page_len;
int status;
status = decode_op_hdr(xdr, OP_READDIR);
if (!status)
status = decode_verifier(xdr, readdir->verifier.data);
if (unlikely(status))
return status;
dprintk("%s: verifier = %08x:%08x\n",
__func__,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1]);
hdrlen = (char *) xdr->p - (char *) iov->iov_base;
recvd = rcvbuf->len - hdrlen;
if (pglen > recvd)
pglen = recvd;
xdr_read_pages(xdr, pglen);
return pglen;
}
static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
size_t hdrlen;
u32 len, recvd;
__be32 *p;
int status;
status = decode_op_hdr(xdr, OP_READLINK);
if (status)
return status;
/* Convert length of symlink */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
if (len >= rcvbuf->page_len || len <= 0) {
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
}
hdrlen = (char *) xdr->p - (char *) iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (recvd < len) {
dprintk("NFS: server cheating in readlink reply: "
"count %u > recvd %u\n", len, recvd);
return -EIO;
}
xdr_read_pages(xdr, len);
/*
* The XDR encode routine has set things up so that
* the link text will be copied directly into the
* buffer. We just have to do overflow-checking,
* and and null-terminate the text (the VFS expects
* null-termination).
*/
xdr_terminate_string(rcvbuf, len);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
{
int status;
status = decode_op_hdr(xdr, OP_REMOVE);
if (status)
goto out;
status = decode_change_info(xdr, cinfo);
out:
return status;
}
static int decode_rename(struct xdr_stream *xdr, struct nfs4_change_info *old_cinfo,
struct nfs4_change_info *new_cinfo)
{
int status;
status = decode_op_hdr(xdr, OP_RENAME);
if (status)
goto out;
if ((status = decode_change_info(xdr, old_cinfo)))
goto out;
status = decode_change_info(xdr, new_cinfo);
out:
return status;
}
static int decode_renew(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_RENEW);
}
static int
decode_restorefh(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_RESTOREFH);
}
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t *acl_len)
{
__be32 *savep;
uint32_t attrlen,
bitmap[3] = {0};
struct kvec *iov = req->rq_rcv_buf.head;
int status;
*acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto out;
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
size_t hdrlen;
u32 recvd;
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (attrlen > recvd) {
dprintk("NFS: server cheating in getattr"
" acl reply: attrlen %u > recvd %u\n",
attrlen, recvd);
return -EINVAL;
}
xdr_read_pages(xdr, attrlen);
*acl_len = attrlen;
} else
status = -EOPNOTSUPP;
out:
return status;
}
static int
decode_savefh(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_SAVEFH);
}
static int decode_setattr(struct xdr_stream *xdr)
{
__be32 *p;
uint32_t bmlen;
int status;
status = decode_op_hdr(xdr, OP_SETATTR);
if (status)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
bmlen = be32_to_cpup(p);
p = xdr_inline_decode(xdr, bmlen << 2);
if (likely(p))
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res)
{
__be32 *p;
uint32_t opnum;
int32_t nfserr;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
opnum = be32_to_cpup(p++);
if (opnum != OP_SETCLIENTID) {
dprintk("nfs: decode_setclientid: Server returned operation"
" %d\n", opnum);
return -EIO;
}
nfserr = be32_to_cpup(p);
if (nfserr == NFS_OK) {
p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &res->clientid);
memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
} else if (nfserr == NFSERR_CLID_INUSE) {
uint32_t len;
/* skip netid string */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
/* skip uaddr string */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
return -NFSERR_CLID_INUSE;
} else
return nfs4_stat_to_errno(nfserr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_setclientid_confirm(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM);
}
static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
{
__be32 *p;
int status;
status = decode_op_hdr(xdr, OP_WRITE);
if (status)
return status;
p = xdr_inline_decode(xdr, 16);
if (unlikely(!p))
goto out_overflow;
res->count = be32_to_cpup(p++);
res->verf->committed = be32_to_cpup(p++);
memcpy(res->verf->verifier, p, 8);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_delegreturn(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_DELEGRETURN);
}
static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
flavor->gss.sec_oid4.len = be32_to_cpup(p);
if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
goto out_err;
p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
if (unlikely(!p))
goto out_overflow;
memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
flavor->gss.qop4 = be32_to_cpup(p++);
flavor->gss.service = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
out_err:
return -EINVAL;
}
static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
{
struct nfs4_secinfo_flavor *sec_flavor;
int status;
__be32 *p;
int i, num_flavors;
status = decode_op_hdr(xdr, OP_SECINFO);
if (status)
goto out;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
res->flavors->num_flavors = 0;
num_flavors = be32_to_cpup(p);
for (i = 0; i < num_flavors; i++) {
sec_flavor = &res->flavors->flavors[i];
if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE)
break;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
sec_flavor->flavor = be32_to_cpup(p);
if (sec_flavor->flavor == RPC_AUTH_GSS) {
status = decode_secinfo_gss(xdr, sec_flavor);
if (status)
goto out;
}
res->flavors->num_flavors++;
}
out:
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
#if defined(CONFIG_NFS_V4_1)
static int decode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_res *res)
{
__be32 *p;
uint32_t dummy;
char *dummy_str;
int status;
struct nfs_client *clp = res->client;
status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
if (status)
return status;
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
xdr_decode_hyper(p, &clp->cl_clientid);
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
clp->cl_seqid = be32_to_cpup(p++);
clp->cl_exchange_flags = be32_to_cpup(p++);
/* We ask for SP4_NONE */
dummy = be32_to_cpup(p);
if (dummy != SP4_NONE)
return -EIO;
/* Throw away minor_id */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
/* Throw away Major id */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
/* Save server_scope */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
return -EIO;
memcpy(res->server_scope->server_scope, dummy_str, dummy);
res->server_scope->server_scope_sz = dummy;
/* Throw away Implementation id array */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_chan_attrs(struct xdr_stream *xdr,
struct nfs4_channel_attrs *attrs)
{
__be32 *p;
u32 nr_attrs, val;
p = xdr_inline_decode(xdr, 28);
if (unlikely(!p))
goto out_overflow;
val = be32_to_cpup(p++); /* headerpadsz */
if (val)
return -EINVAL; /* no support for header padding yet */
attrs->max_rqst_sz = be32_to_cpup(p++);
attrs->max_resp_sz = be32_to_cpup(p++);
attrs->max_resp_sz_cached = be32_to_cpup(p++);
attrs->max_ops = be32_to_cpup(p++);
attrs->max_reqs = be32_to_cpup(p++);
nr_attrs = be32_to_cpup(p);
if (unlikely(nr_attrs > 1)) {
printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
__func__, nr_attrs);
return -EINVAL;
}
if (nr_attrs == 1) {
p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
if (unlikely(!p))
goto out_overflow;
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
{
return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
}
static int decode_create_session(struct xdr_stream *xdr,
struct nfs41_create_session_res *res)
{
__be32 *p;
int status;
struct nfs_client *clp = res->client;
struct nfs4_session *session = clp->cl_session;
status = decode_op_hdr(xdr, OP_CREATE_SESSION);
if (!status)
status = decode_sessionid(xdr, &session->sess_id);
if (unlikely(status))
return status;
/* seqid, flags */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
clp->cl_seqid = be32_to_cpup(p++);
session->flags = be32_to_cpup(p);
/* Channel attributes */
status = decode_chan_attrs(xdr, &session->fc_attrs);
if (!status)
status = decode_chan_attrs(xdr, &session->bc_attrs);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_DESTROY_SESSION);
}
static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
}
#endif /* CONFIG_NFS_V4_1 */
static int decode_sequence(struct xdr_stream *xdr,
struct nfs4_sequence_res *res,
struct rpc_rqst *rqstp)
{
#if defined(CONFIG_NFS_V4_1)
struct nfs4_sessionid id;
u32 dummy;
int status;
__be32 *p;
if (!res->sr_session)
return 0;
status = decode_op_hdr(xdr, OP_SEQUENCE);
if (!status)
status = decode_sessionid(xdr, &id);
if (unlikely(status))
goto out_err;
/*
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
status = -EREMOTEIO;
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
dprintk("%s Invalid session id\n", __func__);
goto out_err;
}
p = xdr_inline_decode(xdr, 20);
if (unlikely(!p))
goto out_overflow;
/* seqid */
dummy = be32_to_cpup(p++);
if (dummy != res->sr_slot->seq_nr) {
dprintk("%s Invalid sequence number\n", __func__);
goto out_err;
}
/* slot id */
dummy = be32_to_cpup(p++);
if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) {
dprintk("%s Invalid slot id\n", __func__);
goto out_err;
}
/* highest slot id - currently not processed */
dummy = be32_to_cpup(p++);
/* target highest slot id - currently not processed */
dummy = be32_to_cpup(p++);
/* result flags */
res->sr_status_flags = be32_to_cpup(p);
status = 0;
out_err:
res->sr_status = status;
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
status = -EIO;
goto out_err;
#else /* CONFIG_NFS_V4_1 */
return 0;
#endif /* CONFIG_NFS_V4_1 */
}
#if defined(CONFIG_NFS_V4_1)
/*
* TODO: Need to handle case when EOF != true;
*/
static int decode_getdevicelist(struct xdr_stream *xdr,
struct pnfs_devicelist *res)
{
__be32 *p;
int status, i;
struct nfs_writeverf verftemp;
status = decode_op_hdr(xdr, OP_GETDEVICELIST);
if (status)
return status;
p = xdr_inline_decode(xdr, 8 + 8 + 4);
if (unlikely(!p))
goto out_overflow;
/* TODO: Skip cookie for now */
p += 2;
/* Read verifier */
p = xdr_decode_opaque_fixed(p, verftemp.verifier, 8);
res->num_devs = be32_to_cpup(p);
dprintk("%s: num_dev %d\n", __func__, res->num_devs);
if (res->num_devs > NFS4_PNFS_GETDEVLIST_MAXNUM) {
printk(KERN_ERR "%s too many result dev_num %u\n",
__func__, res->num_devs);
return -EIO;
}
p = xdr_inline_decode(xdr,
res->num_devs * NFS4_DEVICEID4_SIZE + 4);
if (unlikely(!p))
goto out_overflow;
for (i = 0; i < res->num_devs; i++)
p = xdr_decode_opaque_fixed(p, res->dev_id[i].data,
NFS4_DEVICEID4_SIZE);
res->eof = be32_to_cpup(p);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_getdeviceinfo(struct xdr_stream *xdr,
struct pnfs_device *pdev)
{
__be32 *p;
uint32_t len, type;
int status;
status = decode_op_hdr(xdr, OP_GETDEVICEINFO);
if (status) {
if (status == -ETOOSMALL) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
pdev->mincount = be32_to_cpup(p);
dprintk("%s: Min count too small. mincnt = %u\n",
__func__, pdev->mincount);
}
return status;
}
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
type = be32_to_cpup(p++);
if (type != pdev->layout_type) {
dprintk("%s: layout mismatch req: %u pdev: %u\n",
__func__, pdev->layout_type, type);
return -EINVAL;
}
/*
* Get the length of the opaque device_addr4. xdr_read_pages places
* the opaque device_addr4 in the xdr_buf->pages (pnfs_device->pages)
* and places the remaining xdr data in xdr_buf->tail
*/
pdev->mincount = be32_to_cpup(p);
xdr_read_pages(xdr, pdev->mincount); /* include space for the length */
/* Parse notification bitmap, verifying that it is zero. */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
len = be32_to_cpup(p);
if (len) {
uint32_t i;
p = xdr_inline_decode(xdr, 4 * len);
if (unlikely(!p))
goto out_overflow;
for (i = 0; i < len; i++, p++) {
if (be32_to_cpup(p)) {
dprintk("%s: notifications not supported\n",
__func__);
return -EIO;
}
}
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
struct nfs4_layoutget_res *res)
{
__be32 *p;
int status;
u32 layout_count;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
u32 hdrlen, recvd;
status = decode_op_hdr(xdr, OP_LAYOUTGET);
if (status)
return status;
p = xdr_inline_decode(xdr, 8 + NFS4_STATEID_SIZE);
if (unlikely(!p))
goto out_overflow;
res->return_on_close = be32_to_cpup(p++);
p = xdr_decode_opaque_fixed(p, res->stateid.data, NFS4_STATEID_SIZE);
layout_count = be32_to_cpup(p);
if (!layout_count) {
dprintk("%s: server responded with empty layout array\n",
__func__);
return -EINVAL;
}
p = xdr_inline_decode(xdr, 28);
if (unlikely(!p))
goto out_overflow;
p = xdr_decode_hyper(p, &res->range.offset);
p = xdr_decode_hyper(p, &res->range.length);
res->range.iomode = be32_to_cpup(p++);
res->type = be32_to_cpup(p++);
res->layoutp->len = be32_to_cpup(p);
dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n",
__func__,
(unsigned long)res->range.offset,
(unsigned long)res->range.length,
res->range.iomode,
res->type,
res->layoutp->len);
hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (res->layoutp->len > recvd) {
dprintk("NFS: server cheating in layoutget reply: "
"layout len %u > recvd %u\n",
res->layoutp->len, recvd);
return -EINVAL;
}
xdr_read_pages(xdr, res->layoutp->len);
if (layout_count > 1) {
/* We only handle a length one array at the moment. Any
* further entries are just ignored. Note that this means
* the client may see a response that is less than the
* minimum it requested.
*/
dprintk("%s: server responded with %d layouts, dropping tail\n",
__func__, layout_count);
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_layoutreturn(struct xdr_stream *xdr,
struct nfs4_layoutreturn_res *res)
{
__be32 *p;
int status;
status = decode_op_hdr(xdr, OP_LAYOUTRETURN);
if (status)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
res->lrs_present = be32_to_cpup(p);
if (res->lrs_present)
status = decode_stateid(xdr, &res->stateid);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_layoutcommit(struct xdr_stream *xdr,
struct rpc_rqst *req,
struct nfs4_layoutcommit_res *res)
{
__be32 *p;
__u32 sizechanged;
int status;
status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT);
res->status = status;
if (status)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
sizechanged = be32_to_cpup(p);
if (sizechanged) {
/* throw away new size */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
}
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
static int decode_test_stateid(struct xdr_stream *xdr,
struct nfs41_test_stateid_res *res)
{
__be32 *p;
int status;
int num_res;
status = decode_op_hdr(xdr, OP_TEST_STATEID);
if (status)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
num_res = be32_to_cpup(p++);
if (num_res != 1)
goto out;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
res->status = be32_to_cpup(p++);
return res->status;
out_overflow:
print_overflow_msg(__func__, xdr);
out:
return -EIO;
}
static int decode_free_stateid(struct xdr_stream *xdr,
struct nfs41_free_stateid_res *res)
{
__be32 *p;
int status;
status = decode_op_hdr(xdr, OP_FREE_STATEID);
if (status)
return status;
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
res->status = be32_to_cpup(p++);
return res->status;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
#endif /* CONFIG_NFS_V4_1 */
/*
* END OF "GENERIC" DECODE ROUTINES.
*/
/*
* Decode OPEN_DOWNGRADE response
*/
static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs_closeres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_open_downgrade(xdr, res);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode ACCESS response
*/
static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_accessres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status != 0)
goto out;
status = decode_access(xdr, res);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode LOOKUP response
*/
static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_lookup_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_lookup(xdr);
if (status)
goto out;
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
status = decode_getfattr(xdr, res->fattr, res->server
,!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode LOOKUP_ROOT response
*/
static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_lookup_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putrootfh(xdr);
if (status)
goto out;
status = decode_getfh(xdr, res->fh);
if (status == 0)
status = decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode REMOVE response
*/
static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_removeres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_remove(xdr, &res->cinfo);
if (status)
goto out;
decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode RENAME response
*/
static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_renameres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_savefh(xdr);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
if (status)
goto out;
/* Current FH is target directory */
if (decode_getfattr(xdr, res->new_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->old_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode LINK response
*/
static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_link_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_savefh(xdr);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_link(xdr, &res->cinfo);
if (status)
goto out;
/*
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
if (decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode CREATE response
*/
static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_create_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_savefh(xdr);
if (status)
goto out;
status = decode_create(xdr, &res->dir_cinfo);
if (status)
goto out;
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
if (decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->dir_fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode SYMLINK response
*/
static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_create_res *res)
{
return nfs4_xdr_dec_create(rqstp, xdr, res);
}
/*
* Decode GETATTR response
*/
static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_getattr_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Encode an SETACL request
*/
static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs_setaclargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_setacl(xdr, args, &hdr);
encode_nops(&hdr);
}
/*
* Decode SETACL response
*/
static int
nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_setaclres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_setattr(xdr);
out:
return status;
}
/*
* Decode GETACL response
*/
static int
nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_getaclres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_getacl(xdr, rqstp, &res->acl_len);
out:
return status;
}
/*
* Decode CLOSE response
*/
static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_closeres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_close(xdr, res);
if (status != 0)
goto out;
/*
* Note: Server may do delete on close for this file
* in which case the getattr call will fail with
* an ESTALE error. Shouldn't be a problem,
* though, since fattr->valid will remain unset.
*/
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode OPEN response
*/
static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_openres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_savefh(xdr);
if (status)
goto out;
status = decode_open(xdr, res);
if (status)
goto out;
if (decode_getfh(xdr, &res->fh) != 0)
goto out;
if (decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if (decode_restorefh(xdr) != 0)
goto out;
decode_getfattr(xdr, res->dir_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode OPEN_CONFIRM response
*/
static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs_open_confirmres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_open_confirm(xdr, res);
out:
return status;
}
/*
* Decode OPEN response
*/
static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs_openres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_open(xdr, res);
if (status)
goto out;
decode_getfattr(xdr, res->f_attr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode SETATTR response
*/
static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs_setattrres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_setattr(xdr);
if (status)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode LOCK response
*/
static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_lock_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_lock(xdr, res);
out:
return status;
}
/*
* Decode LOCKT response
*/
static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_lockt_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_lockt(xdr, res);
out:
return status;
}
/*
* Decode LOCKU response
*/
static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_locku_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_locku(xdr, res);
out:
return status;
}
static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *dummy)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_release_lockowner(xdr);
return status;
}
/*
* Decode READLINK response
*/
static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_readlink_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_readlink(xdr, rqstp);
out:
return status;
}
/*
* Decode READDIR response
*/
static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs4_readdir_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_readdir(xdr, rqstp, res);
out:
return status;
}
/*
* Decode Read response
*/
static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_readres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_read(xdr, rqstp, res);
if (!status)
status = res->count;
out:
return status;
}
/*
* Decode WRITE response
*/
static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_writeres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_write(xdr, res);
if (status)
goto out;
if (res->fattr)
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
if (!status)
status = res->count;
out:
return status;
}
/*
* Decode COMMIT response
*/
static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_writeres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_commit(xdr, res);
if (status)
goto out;
if (res->fattr)
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode FSINFO response
*/
static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_fsinfo_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_sequence(xdr, &res->seq_res, req);
if (!status)
status = decode_putfh(xdr);
if (!status)
status = decode_fsinfo(xdr, res->fsinfo);
return status;
}
/*
* Decode PATHCONF response
*/
static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_pathconf_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_sequence(xdr, &res->seq_res, req);
if (!status)
status = decode_putfh(xdr);
if (!status)
status = decode_pathconf(xdr, res->pathconf);
return status;
}
/*
* Decode STATFS response
*/
static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
struct nfs4_statfs_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_sequence(xdr, &res->seq_res, req);
if (!status)
status = decode_putfh(xdr);
if (!status)
status = decode_statfs(xdr, res->fsstat);
return status;
}
/*
* Decode GETATTR_BITMAP response
*/
static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_server_caps_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, req);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_server_caps(xdr, res);
out:
return status;
}
/*
* Decode RENEW response
*/
static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
void *__unused)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_renew(xdr);
return status;
}
/*
* Decode SETCLIENTID response
*/
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_setclientid_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_setclientid(xdr, res);
return status;
}
/*
* Decode SETCLIENTID_CONFIRM response
*/
static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs_fsinfo *fsinfo)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_setclientid_confirm(xdr);
if (!status)
status = decode_putrootfh(xdr);
if (!status)
status = decode_fsinfo(xdr, fsinfo);
return status;
}
/*
* Decode DELEGRETURN response
*/
static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_delegreturnres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status != 0)
goto out;
status = decode_delegreturn(xdr);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode FS_LOCATIONS response
*/
static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs4_fs_locations_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, req);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_lookup(xdr);
if (status)
goto out;
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr(xdr, &res->fs_locations->fattr,
res->fs_locations->server,
!RPC_IS_ASYNC(req->rq_task));
out:
return status;
}
/*
* Decode SECINFO response
*/
static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_secinfo_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_secinfo(xdr, res);
out:
return status;
}
#if defined(CONFIG_NFS_V4_1)
/*
* Decode EXCHANGE_ID response
*/
static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_exchange_id(xdr, res);
return status;
}
/*
* Decode CREATE_SESSION response
*/
static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs41_create_session_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_create_session(xdr, res);
return status;
}
/*
* Decode DESTROY_SESSION response
*/
static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
void *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_destroy_session(xdr, res);
return status;
}
/*
* Decode SEQUENCE response
*/
static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_sequence_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_sequence(xdr, res, rqstp);
return status;
}
/*
* Decode GET_LEASE_TIME response
*/
static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_get_lease_time_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
if (!status)
status = decode_putrootfh(xdr);
if (!status)
status = decode_fsinfo(xdr, res->lr_fsinfo);
return status;
}
/*
* Decode RECLAIM_COMPLETE response
*/
static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs41_reclaim_complete_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (!status)
status = decode_reclaim_complete(xdr, (void *)NULL);
return status;
}
/*
* Decode GETDEVICELIST response
*/
static int nfs4_xdr_dec_getdevicelist(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_getdevicelist_res *res)
{
struct compound_hdr hdr;
int status;
dprintk("encoding getdevicelist!\n");
status = decode_compound_hdr(xdr, &hdr);
if (status != 0)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status != 0)
goto out;
status = decode_putfh(xdr);
if (status != 0)
goto out;
status = decode_getdevicelist(xdr, res->devlist);
out:
return status;
}
/*
* Decode GETDEVINFO response
*/
static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_getdeviceinfo_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status != 0)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status != 0)
goto out;
status = decode_getdeviceinfo(xdr, res->pdev);
out:
return status;
}
/*
* Decode LAYOUTGET response
*/
static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_layoutget_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_layoutget(xdr, rqstp, res);
out:
return status;
}
/*
* Decode LAYOUTRETURN response
*/
static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_layoutreturn_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_layoutreturn(xdr, res);
out:
return status;
}
/*
* Decode LAYOUTCOMMIT response
*/
static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_layoutcommit_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_layoutcommit(xdr, rqstp, res);
if (status)
goto out;
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
/*
* Decode SECINFO_NO_NAME response
*/
static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs4_secinfo_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putrootfh(xdr);
if (status)
goto out;
status = decode_secinfo(xdr, res);
out:
return status;
}
/*
* Decode TEST_STATEID response
*/
static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs41_test_stateid_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_test_stateid(xdr, res);
out:
return status;
}
/*
* Decode FREE_STATEID response
*/
static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfs41_free_stateid_res *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_free_stateid(xdr, res);
out:
return status;
}
#endif /* CONFIG_NFS_V4_1 */
/**
* nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
* the local page cache.
* @xdr: XDR stream where entry resides
* @entry: buffer to fill in with entry data
* @plus: boolean indicating whether this should be a readdirplus entry
*
* Returns zero if successful, otherwise a negative errno value is
* returned.
*
* This function is not invoked during READDIR reply decoding, but
* rather whenever an application invokes the getdents(2) system call
* on a directory already in our cache.
*/
int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int plus)
{
uint32_t bitmap[3] = {0};
uint32_t len;
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
if (*p == xdr_zero) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
if (*p == xdr_zero)
return -EAGAIN;
entry->eof = 1;
return -EBADCOOKIE;
}
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
entry->prev_cookie = entry->cookie;
p = xdr_decode_hyper(p, &entry->cookie);
entry->len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, entry->len);
if (unlikely(!p))
goto out_overflow;
entry->name = (const char *) p;
/*
* In case the server doesn't return an inode number,
* we fake one here. (We don't use inode number 0,
* since glibc seems to choke on it...)
*/
entry->ino = 1;
entry->fattr->valid = 0;
if (decode_attr_bitmap(xdr, bitmap) < 0)
goto out_overflow;
if (decode_attr_length(xdr, &len, &p) < 0)
goto out_overflow;
if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
entry->server, 1) < 0)
goto out_overflow;
if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
entry->ino = entry->fattr->mounted_on_fileid;
else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
entry->ino = entry->fattr->fileid;
entry->d_type = DT_UNKNOWN;
if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EAGAIN;
}
/*
* We need to translate between nfs status return values and
* the local errno values which may not be the same.
*/
static struct {
int stat;
int errno;
} nfs_errtbl[] = {
{ NFS4_OK, 0 },
{ NFS4ERR_PERM, -EPERM },
{ NFS4ERR_NOENT, -ENOENT },
{ NFS4ERR_IO, -errno_NFSERR_IO},
{ NFS4ERR_NXIO, -ENXIO },
{ NFS4ERR_ACCESS, -EACCES },
{ NFS4ERR_EXIST, -EEXIST },
{ NFS4ERR_XDEV, -EXDEV },
{ NFS4ERR_NOTDIR, -ENOTDIR },
{ NFS4ERR_ISDIR, -EISDIR },
{ NFS4ERR_INVAL, -EINVAL },
{ NFS4ERR_FBIG, -EFBIG },
{ NFS4ERR_NOSPC, -ENOSPC },
{ NFS4ERR_ROFS, -EROFS },
{ NFS4ERR_MLINK, -EMLINK },
{ NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
{ NFS4ERR_NOTEMPTY, -ENOTEMPTY },
{ NFS4ERR_DQUOT, -EDQUOT },
{ NFS4ERR_STALE, -ESTALE },
{ NFS4ERR_BADHANDLE, -EBADHANDLE },
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
{ NFS4ERR_SERVERFAULT, -EREMOTEIO },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
{ NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
{ -1, -EIO }
};
/*
* Convert an NFS error code to a local one.
* This one is used jointly by NFSv2 and NFSv3.
*/
static int
nfs4_stat_to_errno(int stat)
{
int i;
for (i = 0; nfs_errtbl[i].stat != -1; i++) {
if (nfs_errtbl[i].stat == stat)
return nfs_errtbl[i].errno;
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
return -EREMOTEIO;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
* Note: remaining NFSv4 error codes have values > 10000, so should
* not conflict with native Linux error codes.
*/
return -stat;
}
#define PROC(proc, argtype, restype) \
[NFSPROC4_CLNT_##proc] = { \
.p_proc = NFSPROC4_COMPOUND, \
.p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \
.p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \
.p_arglen = NFS4_##argtype##_sz, \
.p_replen = NFS4_##restype##_sz, \
.p_statidx = NFSPROC4_CLNT_##proc, \
.p_name = #proc, \
}
struct rpc_procinfo nfs4_procedures[] = {
PROC(READ, enc_read, dec_read),
PROC(WRITE, enc_write, dec_write),
PROC(COMMIT, enc_commit, dec_commit),
PROC(OPEN, enc_open, dec_open),
PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm),
PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr),
PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade),
PROC(CLOSE, enc_close, dec_close),
PROC(SETATTR, enc_setattr, dec_setattr),
PROC(FSINFO, enc_fsinfo, dec_fsinfo),
PROC(RENEW, enc_renew, dec_renew),
PROC(SETCLIENTID, enc_setclientid, dec_setclientid),
PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
PROC(LOCK, enc_lock, dec_lock),
PROC(LOCKT, enc_lockt, dec_lockt),
PROC(LOCKU, enc_locku, dec_locku),
PROC(ACCESS, enc_access, dec_access),
PROC(GETATTR, enc_getattr, dec_getattr),
PROC(LOOKUP, enc_lookup, dec_lookup),
PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
PROC(REMOVE, enc_remove, dec_remove),
PROC(RENAME, enc_rename, dec_rename),
PROC(LINK, enc_link, dec_link),
PROC(SYMLINK, enc_symlink, dec_symlink),
PROC(CREATE, enc_create, dec_create),
PROC(PATHCONF, enc_pathconf, dec_pathconf),
PROC(STATFS, enc_statfs, dec_statfs),
PROC(READLINK, enc_readlink, dec_readlink),
PROC(READDIR, enc_readdir, dec_readdir),
PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
PROC(SECINFO, enc_secinfo, dec_secinfo),
#if defined(CONFIG_NFS_V4_1)
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
PROC(SEQUENCE, enc_sequence, dec_sequence),
PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist),
#endif /* CONFIG_NFS_V4_1 */
};
struct rpc_version nfs_version4 = {
.number = 4,
.nrprocs = ARRAY_SIZE(nfs4_procedures),
.procs = nfs4_procedures
};
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3527_1 |
crossvul-cpp_data_bad_1613_0 | /* -----------------------------------------------------------------------------
* Copyright (c) 2011 Ozmo Inc
* Released under the GNU General Public License Version 2 (GPLv2).
*
* This file provides the implementation of a USB host controller device that
* does not have any associated hardware. Instead the virtual device is
* connected to the WiFi network and emulates the operation of a USB hcd by
* receiving and sending network frames.
* Note:
* We take great pains to reduce the amount of code where interrupts need to be
* disabled and in this respect we are different from standard HCD's. In
* particular we don't want in_irq() code bleeding over to the protocol side of
* the driver.
* The troublesome functions are the urb enqueue and dequeue functions both of
* which can be called in_irq(). So for these functions we put the urbs into a
* queue and request a tasklet to process them. This means that a spinlock with
* interrupts disabled must be held for insertion and removal but most code is
* is in tasklet or soft irq context. The lock that protects this list is called
* the tasklet lock and serves the purpose of the 'HCD lock' which must be held
* when calling the following functions.
* usb_hcd_link_urb_to_ep()
* usb_hcd_unlink_urb_from_ep()
* usb_hcd_flush_endpoint()
* usb_hcd_check_unlink_urb()
* -----------------------------------------------------------------------------
*/
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "linux/usb/hcd.h"
#include <asm/unaligned.h>
#include "ozdbg.h"
#include "ozusbif.h"
#include "ozurbparanoia.h"
#include "ozhcd.h"
/*
* Number of units of buffering to capture for an isochronous IN endpoint before
* allowing data to be indicated up.
*/
#define OZ_IN_BUFFERING_UNITS 100
/* Name of our platform device.
*/
#define OZ_PLAT_DEV_NAME "ozwpan"
/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
*/
#define EP0_TIMEOUT_COUNTER 13
/* Debounce time HCD driver should wait before unregistering.
*/
#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
/*
* Used to link urbs together and also store some status information for each
* urb.
* A cache of these are kept in a pool to reduce number of calls to kmalloc.
*/
struct oz_urb_link {
struct list_head link;
struct urb *urb;
struct oz_port *port;
u8 req_id;
u8 ep_num;
unsigned submit_counter;
};
static struct kmem_cache *oz_urb_link_cache;
/* Holds state information about a USB endpoint.
*/
#define OZ_EP_BUFFER_SIZE_ISOC (1024 * 24)
#define OZ_EP_BUFFER_SIZE_INT 512
struct oz_endpoint {
struct list_head urb_list; /* List of oz_urb_link items. */
struct list_head link; /* For isoc ep, links in to isoc
lists of oz_port. */
struct timespec timestamp;
int credit;
int credit_ceiling;
u8 ep_num;
u8 attrib;
u8 *buffer;
int buffer_size;
int in_ix;
int out_ix;
int buffered_units;
unsigned flags;
int start_frame;
};
/* Bits in the flags field. */
#define OZ_F_EP_BUFFERING 0x1
#define OZ_F_EP_HAVE_STREAM 0x2
/* Holds state information about a USB interface.
*/
struct oz_interface {
unsigned ep_mask;
u8 alt;
};
/* Holds state information about an hcd port.
*/
#define OZ_NB_ENDPOINTS 16
struct oz_port {
unsigned flags;
unsigned status;
void *hpd;
struct oz_hcd *ozhcd;
spinlock_t port_lock;
u8 bus_addr;
u8 next_req_id;
u8 config_num;
int num_iface;
struct oz_interface *iface;
struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
struct list_head isoc_out_ep;
struct list_head isoc_in_ep;
};
#define OZ_PORT_F_PRESENT 0x1
#define OZ_PORT_F_CHANGED 0x2
#define OZ_PORT_F_DYING 0x4
/* Data structure in the private context area of struct usb_hcd.
*/
#define OZ_NB_PORTS 8
struct oz_hcd {
spinlock_t hcd_lock;
struct list_head urb_pending_list;
struct list_head urb_cancel_list;
struct list_head orphanage;
int conn_port; /* Port that is currently connecting, -1 if none.*/
struct oz_port ports[OZ_NB_PORTS];
uint flags;
struct usb_hcd *hcd;
};
/* Bits in flags field.
*/
#define OZ_HDC_F_SUSPENDED 0x1
/*
* Static function prototypes.
*/
static int oz_hcd_start(struct usb_hcd *hcd);
static void oz_hcd_stop(struct usb_hcd *hcd);
static void oz_hcd_shutdown(struct usb_hcd *hcd);
static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags);
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength);
static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
static int oz_hcd_bus_resume(struct usb_hcd *hcd);
static int oz_plat_probe(struct platform_device *dev);
static int oz_plat_remove(struct platform_device *dev);
static void oz_plat_shutdown(struct platform_device *dev);
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
static int oz_plat_resume(struct platform_device *dev);
static void oz_urb_process_tasklet(unsigned long unused);
static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port, struct usb_host_config *config,
gfp_t mem_flags);
static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port);
static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port,
struct usb_host_interface *intf, gfp_t mem_flags);
static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port, int if_ix);
static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
gfp_t mem_flags);
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb);
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
/*
* Static external variables.
*/
static struct platform_device *g_plat_dev;
static struct oz_hcd *g_ozhcd;
static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
static const char g_hcd_name[] = "Ozmo WPAN";
static DEFINE_SPINLOCK(g_tasklet_lock);
static struct tasklet_struct g_urb_process_tasklet;
static struct tasklet_struct g_urb_cancel_tasklet;
static atomic_t g_pending_urbs = ATOMIC_INIT(0);
static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
static const struct hc_driver g_oz_hc_drv = {
.description = g_hcd_name,
.product_desc = "Ozmo Devices WPAN",
.hcd_priv_size = sizeof(struct oz_hcd),
.flags = HCD_USB11,
.start = oz_hcd_start,
.stop = oz_hcd_stop,
.shutdown = oz_hcd_shutdown,
.urb_enqueue = oz_hcd_urb_enqueue,
.urb_dequeue = oz_hcd_urb_dequeue,
.endpoint_disable = oz_hcd_endpoint_disable,
.endpoint_reset = oz_hcd_endpoint_reset,
.get_frame_number = oz_hcd_get_frame_number,
.hub_status_data = oz_hcd_hub_status_data,
.hub_control = oz_hcd_hub_control,
.bus_suspend = oz_hcd_bus_suspend,
.bus_resume = oz_hcd_bus_resume,
};
static struct platform_driver g_oz_plat_drv = {
.probe = oz_plat_probe,
.remove = oz_plat_remove,
.shutdown = oz_plat_shutdown,
.suspend = oz_plat_suspend,
.resume = oz_plat_resume,
.driver = {
.name = OZ_PLAT_DEV_NAME,
},
};
/*
* Gets our private context area (which is of type struct oz_hcd) from the
* usb_hcd structure.
* Context: any
*/
static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
{
return (struct oz_hcd *)hcd->hcd_priv;
}
/*
* Searches list of ports to find the index of the one with a specified USB
* bus address. If none of the ports has the bus address then the connection
* port is returned, if there is one or -1 otherwise.
* Context: any
*/
static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
{
int i;
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].bus_addr == bus_addr)
return i;
}
return ozhcd->conn_port;
}
/*
* Context: any
*/
static struct oz_urb_link *oz_alloc_urb_link(void)
{
return kmem_cache_alloc(oz_urb_link_cache, GFP_ATOMIC);
}
/*
* Context: any
*/
static void oz_free_urb_link(struct oz_urb_link *urbl)
{
if (!urbl)
return;
kmem_cache_free(oz_urb_link_cache, urbl);
}
/*
* Allocates endpoint structure and optionally a buffer. If a buffer is
* allocated it immediately follows the endpoint structure.
* Context: softirq
*/
static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
{
struct oz_endpoint *ep;
ep = kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
if (!ep)
return NULL;
INIT_LIST_HEAD(&ep->urb_list);
INIT_LIST_HEAD(&ep->link);
ep->credit = -1;
if (buffer_size) {
ep->buffer_size = buffer_size;
ep->buffer = (u8 *)(ep+1);
}
return ep;
}
/*
* Pre-condition: Must be called with g_tasklet_lock held and interrupts
* disabled.
* Context: softirq or process
*/
static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd,
struct urb *urb)
{
struct oz_urb_link *urbl;
list_for_each_entry(urbl, &ozhcd->urb_cancel_list, link) {
if (urb == urbl->urb) {
list_del_init(&urbl->link);
return urbl;
}
}
return NULL;
}
/*
* This is called when we have finished processing an urb. It unlinks it from
* the ep and returns it to the core.
* Context: softirq or process
*/
static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned long irq_state;
struct oz_urb_link *cancel_urbl;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Clear hcpriv which will prevent it being put in the cancel list
* in the event that an attempt is made to cancel it.
*/
urb->hcpriv = NULL;
/* Walk the cancel list in case the urb is already sitting there.
* Since we process the cancel list in a tasklet rather than in
* the dequeue function this could happen.
*/
cancel_urbl = oz_uncancel_urb(ozhcd, urb);
/* Note: we release lock but do not enable local irqs.
* It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
* or at least other host controllers disable interrupts at this point
* so we do the same. We must, however, release the lock otherwise a
* deadlock will occur if an urb is submitted to our driver in the urb
* completion function. Because we disable interrupts it is possible
* that the urb_enqueue function can be called with them disabled.
*/
spin_unlock(&g_tasklet_lock);
if (oz_forget_urb(urb)) {
oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
} else {
atomic_dec(&g_pending_urbs);
usb_hcd_giveback_urb(hcd, urb, status);
}
spin_lock(&g_tasklet_lock);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_free_urb_link(cancel_urbl);
}
/*
* Deallocates an endpoint including deallocating any associated stream and
* returning any queued urbs to the core.
* Context: softirq
*/
static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
{
if (port) {
LIST_HEAD(list);
struct oz_hcd *ozhcd = port->ozhcd;
if (ep->flags & OZ_F_EP_HAVE_STREAM)
oz_usb_stream_delete(port->hpd, ep->ep_num);
/* Transfer URBs to the orphanage while we hold the lock. */
spin_lock_bh(&ozhcd->hcd_lock);
/* Note: this works even if ep->urb_list is empty.*/
list_replace_init(&ep->urb_list, &list);
/* Put the URBs in the orphanage. */
list_splice_tail(&list, &ozhcd->orphanage);
spin_unlock_bh(&ozhcd->hcd_lock);
}
oz_dbg(ON, "Freeing endpoint memory\n");
kfree(ep);
}
/*
* Context: softirq
*/
static void oz_complete_buffered_urb(struct oz_port *port,
struct oz_endpoint *ep,
struct urb *urb)
{
int data_len, available_space, copy_len;
data_len = ep->buffer[ep->out_ix];
if (data_len <= urb->transfer_buffer_length)
available_space = data_len;
else
available_space = urb->transfer_buffer_length;
if (++ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
copy_len = ep->buffer_size - ep->out_ix;
if (copy_len >= available_space)
copy_len = available_space;
memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len);
if (copy_len < available_space) {
memcpy((urb->transfer_buffer + copy_len), ep->buffer,
(available_space - copy_len));
ep->out_ix = available_space - copy_len;
} else {
ep->out_ix += copy_len;
}
urb->actual_length = available_space;
if (ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
ep->buffered_units--;
oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
available_space);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/*
* Context: softirq
*/
static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb, u8 req_id)
{
struct oz_urb_link *urbl;
struct oz_endpoint *ep = NULL;
int err = 0;
if (ep_addr >= OZ_NB_ENDPOINTS) {
oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
return -EINVAL;
}
urbl = oz_alloc_urb_link();
if (!urbl)
return -ENOMEM;
urbl->submit_counter = 0;
urbl->urb = urb;
urbl->req_id = req_id;
urbl->ep_num = ep_addr;
/* Hold lock while we insert the URB into the list within the
* endpoint structure.
*/
spin_lock_bh(&port->ozhcd->hcd_lock);
/* If the urb has been unlinked while out of any list then
* complete it now.
*/
if (urb->unlinked) {
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
oz_free_urb_link(urbl);
return 0;
}
if (in_dir)
ep = port->in_ep[ep_addr];
else
ep = port->out_ep[ep_addr];
if (!ep) {
err = -ENOMEM;
goto out;
}
/*For interrupt endpoint check for buffered data
* & complete urb
*/
if (((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
&& ep->buffered_units > 0) {
oz_free_urb_link(urbl);
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_complete_buffered_urb(port, ep, urb);
return 0;
}
if (port->hpd) {
list_add_tail(&urbl->link, &ep->urb_list);
if (!in_dir && ep_addr && (ep->credit < 0)) {
getrawmonotonic(&ep->timestamp);
ep->credit = 0;
}
} else {
err = -EPIPE;
}
out:
spin_unlock_bh(&port->ozhcd->hcd_lock);
if (err)
oz_free_urb_link(urbl);
return err;
}
/*
* Removes an urb from the queue in the endpoint.
* Returns 0 if it is found and -EIDRM otherwise.
* Context: softirq
*/
static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb)
{
struct oz_urb_link *urbl = NULL;
struct oz_endpoint *ep;
spin_lock_bh(&port->ozhcd->hcd_lock);
if (in_dir)
ep = port->in_ep[ep_addr];
else
ep = port->out_ep[ep_addr];
if (ep) {
struct list_head *e;
list_for_each(e, &ep->urb_list) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del_init(e);
break;
}
urbl = NULL;
}
}
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_free_urb_link(urbl);
return urbl ? 0 : -EIDRM;
}
/*
* Finds an urb given its request id.
* Context: softirq
*/
static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
u8 req_id)
{
struct oz_hcd *ozhcd = port->ozhcd;
struct urb *urb = NULL;
struct oz_urb_link *urbl;
struct oz_endpoint *ep;
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->out_ep[ep_ix];
if (ep) {
struct list_head *e;
list_for_each(e, &ep->urb_list) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urbl->req_id == req_id) {
urb = urbl->urb;
list_del_init(e);
break;
}
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
/* If urb is non-zero then we we must have an urb link to delete.
*/
if (urb)
oz_free_urb_link(urbl);
return urb;
}
/*
* Pre-condition: Port lock must be held.
* Context: softirq
*/
static void oz_acquire_port(struct oz_port *port, void *hpd)
{
INIT_LIST_HEAD(&port->isoc_out_ep);
INIT_LIST_HEAD(&port->isoc_in_ep);
port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
port->status |= USB_PORT_STAT_CONNECTION |
(USB_PORT_STAT_C_CONNECTION << 16);
oz_usb_get(hpd);
port->hpd = hpd;
}
/*
* Context: softirq
*/
static struct oz_hcd *oz_hcd_claim(void)
{
struct oz_hcd *ozhcd;
spin_lock_bh(&g_hcdlock);
ozhcd = g_ozhcd;
if (ozhcd)
usb_get_hcd(ozhcd->hcd);
spin_unlock_bh(&g_hcdlock);
return ozhcd;
}
/*
* Context: softirq
*/
static inline void oz_hcd_put(struct oz_hcd *ozhcd)
{
if (ozhcd)
usb_put_hcd(ozhcd->hcd);
}
/*
* This is called by the protocol handler to notify that a PD has arrived.
* We allocate a port to associate with the PD and create a structure for
* endpoint 0. This port is made the connection port.
* In the event that one of the other port is already a connection port then
* we fail.
* TODO We should be able to do better than fail and should be able remember
* that this port needs configuring and make it the connection port once the
* current connection port has been assigned an address. Collisions here are
* probably very rare indeed.
* Context: softirq
*/
struct oz_port *oz_hcd_pd_arrived(void *hpd)
{
int i;
struct oz_port *hport;
struct oz_hcd *ozhcd;
struct oz_endpoint *ep;
ozhcd = oz_hcd_claim();
if (!ozhcd)
return NULL;
/* Allocate an endpoint object in advance (before holding hcd lock) to
* use for out endpoint 0.
*/
ep = oz_ep_alloc(0, GFP_ATOMIC);
if (!ep)
goto err_put;
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0)
goto err_unlock;
for (i = 0; i < OZ_NB_PORTS; i++) {
struct oz_port *port = &ozhcd->ports[i];
spin_lock(&port->port_lock);
if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
oz_acquire_port(port, hpd);
spin_unlock(&port->port_lock);
break;
}
spin_unlock(&port->port_lock);
}
if (i == OZ_NB_PORTS)
goto err_unlock;
ozhcd->conn_port = i;
hport = &ozhcd->ports[i];
hport->out_ep[0] = ep;
spin_unlock_bh(&ozhcd->hcd_lock);
if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
usb_hcd_resume_root_hub(ozhcd->hcd);
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_hcd_put(ozhcd);
return hport;
err_unlock:
spin_unlock_bh(&ozhcd->hcd_lock);
oz_ep_free(NULL, ep);
err_put:
oz_hcd_put(ozhcd);
return NULL;
}
/*
* This is called by the protocol handler to notify that the PD has gone away.
* We need to deallocate all resources and then request that the root hub is
* polled. We release the reference we hold on the PD.
* Context: softirq
*/
void oz_hcd_pd_departed(struct oz_port *port)
{
struct oz_hcd *ozhcd;
void *hpd;
struct oz_endpoint *ep = NULL;
if (port == NULL) {
oz_dbg(ON, "%s: port = 0\n", __func__);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL)
return;
/* Check if this is the connection port - if so clear it.
*/
spin_lock_bh(&ozhcd->hcd_lock);
if ((ozhcd->conn_port >= 0) &&
(port == &ozhcd->ports[ozhcd->conn_port])) {
oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_lock(&port->port_lock);
port->flags |= OZ_PORT_F_DYING;
spin_unlock(&port->port_lock);
spin_unlock_bh(&ozhcd->hcd_lock);
oz_clean_endpoints_for_config(ozhcd->hcd, port);
spin_lock_bh(&port->port_lock);
hpd = port->hpd;
port->hpd = NULL;
port->bus_addr = 0xff;
port->config_num = 0;
port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
port->flags |= OZ_PORT_F_CHANGED;
port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE);
port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
/* If there is an endpont 0 then clear the pointer while we hold
* the spinlock be we deallocate it after releasing the lock.
*/
if (port->out_ep[0]) {
ep = port->out_ep[0];
port->out_ep[0] = NULL;
}
spin_unlock_bh(&port->port_lock);
if (ep)
oz_ep_free(port, ep);
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_usb_put(hpd);
}
/*
* Context: softirq
*/
void oz_hcd_pd_reset(void *hpd, void *hport)
{
/* Cleanup the current configuration and report reset to the core.
*/
struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
oz_dbg(ON, "PD Reset\n");
spin_lock_bh(&port->port_lock);
port->flags |= OZ_PORT_F_CHANGED;
port->status |= USB_PORT_STAT_RESET;
port->status |= (USB_PORT_STAT_C_RESET << 16);
spin_unlock_bh(&port->port_lock);
oz_clean_endpoints_for_config(ozhcd->hcd, port);
usb_hcd_poll_rh_status(ozhcd->hcd);
}
/*
* Context: softirq
*/
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
int length, int offset, int total_size)
{
struct oz_port *port = hport;
struct urb *urb;
int err = 0;
oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb)
return;
if (status == 0) {
int copy_len;
int required_size = urb->transfer_buffer_length;
if (required_size > total_size)
required_size = total_size;
copy_len = required_size-offset;
if (length <= copy_len)
copy_len = length;
memcpy(urb->transfer_buffer+offset, desc, copy_len);
offset += copy_len;
if (offset < required_size) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *)urb->setup_packet;
unsigned wvalue = le16_to_cpu(setup->wValue);
if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
err = -ENOMEM;
else if (oz_usb_get_desc_req(port->hpd, req_id,
setup->bRequestType, (u8)(wvalue>>8),
(u8)wvalue, setup->wIndex, offset,
required_size-offset)) {
oz_dequeue_ep_urb(port, 0, 0, urb);
err = -ENOMEM;
}
if (err == 0)
return;
}
}
urb->actual_length = total_size;
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/*
* Context: softirq
*/
static void oz_display_conf_type(u8 t)
{
switch (t) {
case USB_REQ_GET_STATUS:
oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
break;
case USB_REQ_CLEAR_FEATURE:
oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
break;
case USB_REQ_SET_FEATURE:
oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
break;
case USB_REQ_SET_ADDRESS:
oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
break;
case USB_REQ_GET_DESCRIPTOR:
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_SET_DESCRIPTOR:
oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_GET_CONFIGURATION:
oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
break;
case USB_REQ_SET_CONFIGURATION:
oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
break;
case USB_REQ_GET_INTERFACE:
oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
break;
case USB_REQ_SET_INTERFACE:
oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
break;
case USB_REQ_SYNCH_FRAME:
oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
break;
}
}
/*
* Context: softirq
*/
static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
u8 rcode, u8 config_num)
{
int rc = 0;
struct usb_hcd *hcd = port->ozhcd->hcd;
if (rcode == 0) {
port->config_num = config_num;
oz_clean_endpoints_for_config(hcd, port);
if (oz_build_endpoints_for_config(hcd, port,
&urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
rc = -ENOMEM;
}
} else {
rc = -ENOMEM;
}
oz_complete_urb(hcd, urb, rc);
}
/*
* Context: softirq
*/
static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
u8 rcode, u8 if_num, u8 alt)
{
struct usb_hcd *hcd = port->ozhcd->hcd;
int rc = 0;
if ((rcode == 0) && (port->config_num > 0)) {
struct usb_host_config *config;
struct usb_host_interface *intf;
oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
oz_clean_endpoints_for_interface(hcd, port, if_num);
config = &urb->dev->config[port->config_num-1];
intf = &config->intf_cache[if_num]->altsetting[alt];
if (oz_build_endpoints_for_interface(hcd, port, intf,
GFP_ATOMIC))
rc = -ENOMEM;
else
port->iface[if_num].alt = alt;
} else {
rc = -ENOMEM;
}
oz_complete_urb(hcd, urb, rc);
}
/*
* Context: softirq
*/
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
int data_len)
{
struct oz_port *port = hport;
struct urb *urb;
struct usb_ctrlrequest *setup;
struct usb_hcd *hcd = port->ozhcd->hcd;
unsigned windex;
unsigned wvalue;
oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb) {
oz_dbg(ON, "URB not found\n");
return;
}
setup = (struct usb_ctrlrequest *)urb->setup_packet;
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
/* Standard requests */
oz_display_conf_type(setup->bRequest);
switch (setup->bRequest) {
case USB_REQ_SET_CONFIGURATION:
oz_hcd_complete_set_config(port, urb, rcode,
(u8)wvalue);
break;
case USB_REQ_SET_INTERFACE:
oz_hcd_complete_set_interface(port, urb, rcode,
(u8)windex, (u8)wvalue);
break;
default:
oz_complete_urb(hcd, urb, 0);
}
} else {
int copy_len;
oz_dbg(ON, "VENDOR-CLASS - cnf\n");
if (data_len) {
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
else
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
}
oz_complete_urb(hcd, urb, 0);
}
}
/*
* Context: softirq-serialized
*/
static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
int data_len)
{
int space;
int copy_len;
if (!ep->buffer)
return -1;
space = ep->out_ix-ep->in_ix-1;
if (space < 0)
space += ep->buffer_size;
if (space < (data_len+1)) {
oz_dbg(ON, "Buffer full\n");
return -1;
}
ep->buffer[ep->in_ix] = (u8)data_len;
if (++ep->in_ix == ep->buffer_size)
ep->in_ix = 0;
copy_len = ep->buffer_size - ep->in_ix;
if (copy_len > data_len)
copy_len = data_len;
memcpy(&ep->buffer[ep->in_ix], data, copy_len);
if (copy_len < data_len) {
memcpy(ep->buffer, data+copy_len, data_len-copy_len);
ep->in_ix = data_len-copy_len;
} else {
ep->in_ix += copy_len;
}
if (ep->in_ix == ep->buffer_size)
ep->in_ix = 0;
ep->buffered_units++;
return 0;
}
/*
* Context: softirq-serialized
*/
void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
{
struct oz_port *port = (struct oz_port *)hport;
struct oz_endpoint *ep;
struct oz_hcd *ozhcd = port->ozhcd;
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
if (ep == NULL)
goto done;
switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
case USB_ENDPOINT_XFER_BULK:
if (!list_empty(&ep->urb_list)) {
struct oz_urb_link *urbl =
list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
struct urb *urb;
int copy_len;
list_del_init(&urbl->link);
spin_unlock_bh(&ozhcd->hcd_lock);
urb = urbl->urb;
oz_free_urb_link(urbl);
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
else
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
oz_complete_urb(port->ozhcd->hcd, urb, 0);
return;
}
oz_dbg(ON, "buffering frame as URB is not available\n");
oz_hcd_buffer_data(ep, data, data_len);
break;
case USB_ENDPOINT_XFER_ISOC:
oz_hcd_buffer_data(ep, data, data_len);
break;
}
done:
spin_unlock_bh(&ozhcd->hcd_lock);
}
/*
* Context: unknown
*/
static inline int oz_usb_get_frame_number(void)
{
return atomic_inc_return(&g_usb_frame_number);
}
/*
* Context: softirq
*/
int oz_hcd_heartbeat(void *hport)
{
int rc = 0;
struct oz_port *port = hport;
struct oz_hcd *ozhcd = port->ozhcd;
struct oz_urb_link *urbl, *n;
LIST_HEAD(xfr_list);
struct urb *urb;
struct oz_endpoint *ep;
struct timespec ts, delta;
getrawmonotonic(&ts);
/* Check the OUT isoc endpoints to see if any URB data can be sent.
*/
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_entry(ep, &port->isoc_out_ep, link) {
if (ep->credit < 0)
continue;
delta = timespec_sub(ts, ep->timestamp);
ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
if (ep->credit > ep->credit_ceiling)
ep->credit = ep->credit_ceiling;
ep->timestamp = ts;
while (ep->credit && !list_empty(&ep->urb_list)) {
urbl = list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
urb = urbl->urb;
if ((ep->credit + 1) < urb->number_of_packets)
break;
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
list_move_tail(&urbl->link, &xfr_list);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
/* Send to PD and complete URBs.
*/
list_for_each_entry_safe(urbl, n, &xfr_list, link) {
urb = urbl->urb;
list_del_init(&urbl->link);
urb->error_count = 0;
urb->start_frame = oz_usb_get_frame_number();
oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
oz_free_urb_link(urbl);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check the IN isoc endpoints to see if any URBs can be completed.
*/
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_entry(ep, &port->isoc_in_ep, link) {
if (ep->flags & OZ_F_EP_BUFFERING) {
if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
ep->flags &= ~OZ_F_EP_BUFFERING;
ep->credit = 0;
ep->timestamp = ts;
ep->start_frame = 0;
}
continue;
}
delta = timespec_sub(ts, ep->timestamp);
ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
ep->timestamp = ts;
list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
struct urb *urb = urbl->urb;
int len = 0;
int copy_len;
int i;
if (ep->credit < urb->number_of_packets)
break;
if (ep->buffered_units < urb->number_of_packets)
break;
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
len = ep->buffer[ep->out_ix];
if (++ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
copy_len = ep->buffer_size - ep->out_ix;
if (copy_len > len)
copy_len = len;
memcpy(urb->transfer_buffer,
&ep->buffer[ep->out_ix], copy_len);
if (copy_len < len) {
memcpy(urb->transfer_buffer+copy_len,
ep->buffer, len-copy_len);
ep->out_ix = len-copy_len;
} else
ep->out_ix += copy_len;
if (ep->out_ix == ep->buffer_size)
ep->out_ix = 0;
urb->iso_frame_desc[i].offset =
urb->actual_length;
urb->actual_length += len;
urb->iso_frame_desc[i].actual_length = len;
urb->iso_frame_desc[i].status = 0;
}
ep->buffered_units -= urb->number_of_packets;
urb->error_count = 0;
urb->start_frame = ep->start_frame;
ep->start_frame += urb->number_of_packets;
list_move_tail(&urbl->link, &xfr_list);
ep->credit -= urb->number_of_packets;
}
}
if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
rc = 1;
spin_unlock_bh(&ozhcd->hcd_lock);
/* Complete the filled URBs.
*/
list_for_each_entry_safe(urbl, n, &xfr_list, link) {
urb = urbl->urb;
list_del_init(&urbl->link);
oz_free_urb_link(urbl);
oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check if there are any ep0 requests that have timed out.
* If so resent to PD.
*/
ep = port->out_ep[0];
if (ep) {
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
list_move_tail(&urbl->link, &xfr_list);
urbl->submit_counter = 0;
} else {
urbl->submit_counter++;
}
}
if (!list_empty(&ep->urb_list))
rc = 1;
spin_unlock_bh(&ozhcd->hcd_lock);
list_for_each_entry_safe(urbl, n, &xfr_list, link) {
oz_dbg(ON, "Resending request to PD\n");
oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
oz_free_urb_link(urbl);
}
}
return rc;
}
/*
* Context: softirq
*/
static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port,
struct usb_host_interface *intf, gfp_t mem_flags)
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
int if_ix = intf->desc.bInterfaceNumber;
int request_heartbeat = 0;
oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
if (if_ix >= port->num_iface || port->iface == NULL)
return -ENOMEM;
for (i = 0; i < intf->desc.bNumEndpoints; i++) {
struct usb_host_endpoint *hep = &intf->endpoint[i];
u8 ep_addr = hep->desc.bEndpointAddress;
u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
struct oz_endpoint *ep;
int buffer_size = 0;
oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
switch (hep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
break;
case USB_ENDPOINT_XFER_INT:
buffer_size = OZ_EP_BUFFER_SIZE_INT;
break;
}
}
ep = oz_ep_alloc(buffer_size, mem_flags);
if (!ep) {
oz_clean_endpoints_for_interface(hcd, port, if_ix);
return -ENOMEM;
}
ep->attrib = hep->desc.bmAttributes;
ep->ep_num = ep_num;
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
oz_dbg(ON, "wMaxPacketSize = %d\n",
usb_endpoint_maxp(&hep->desc));
ep->credit_ceiling = 200;
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
ep->flags |= OZ_F_EP_BUFFERING;
} else {
ep->flags |= OZ_F_EP_HAVE_STREAM;
if (oz_usb_stream_create(port->hpd, ep_num))
ep->flags &= ~OZ_F_EP_HAVE_STREAM;
}
}
spin_lock_bh(&ozhcd->hcd_lock);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
port->in_ep[ep_num] = ep;
port->iface[if_ix].ep_mask |=
(1<<(ep_num+OZ_NB_ENDPOINTS));
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
list_add_tail(&ep->link, &port->isoc_in_ep);
request_heartbeat = 1;
}
} else {
port->out_ep[ep_num] = ep;
port->iface[if_ix].ep_mask |= (1<<ep_num);
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
list_add_tail(&ep->link, &port->isoc_out_ep);
request_heartbeat = 1;
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
if (request_heartbeat && port->hpd)
oz_usb_request_heartbeat(port->hpd);
}
return 0;
}
/*
* Context: softirq
*/
static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_port *port, int if_ix)
{
struct oz_hcd *ozhcd = port->ozhcd;
unsigned mask;
int i;
LIST_HEAD(ep_list);
struct oz_endpoint *ep, *n;
oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
if (if_ix >= port->num_iface)
return;
spin_lock_bh(&ozhcd->hcd_lock);
mask = port->iface[if_ix].ep_mask;
port->iface[if_ix].ep_mask = 0;
for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
struct list_head *e;
/* Gather OUT endpoints.
*/
if ((mask & (1<<i)) && port->out_ep[i]) {
e = &port->out_ep[i]->link;
port->out_ep[i] = NULL;
/* Remove from isoc list if present.
*/
list_move_tail(e, &ep_list);
}
/* Gather IN endpoints.
*/
if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
e = &port->in_ep[i]->link;
port->in_ep[i] = NULL;
list_move_tail(e, &ep_list);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
list_for_each_entry_safe(ep, n, &ep_list, link) {
list_del_init(&ep->link);
oz_ep_free(port, ep);
}
}
/*
* Context: softirq
*/
static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port, struct usb_host_config *config,
gfp_t mem_flags)
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
int num_iface = config->desc.bNumInterfaces;
if (num_iface) {
struct oz_interface *iface;
iface = kmalloc_array(num_iface, sizeof(struct oz_interface),
mem_flags | __GFP_ZERO);
if (!iface)
return -ENOMEM;
spin_lock_bh(&ozhcd->hcd_lock);
port->iface = iface;
port->num_iface = num_iface;
spin_unlock_bh(&ozhcd->hcd_lock);
}
for (i = 0; i < num_iface; i++) {
struct usb_host_interface *intf =
&config->intf_cache[i]->altsetting[0];
if (oz_build_endpoints_for_interface(hcd, port, intf,
mem_flags))
goto fail;
}
return 0;
fail:
oz_clean_endpoints_for_config(hcd, port);
return -1;
}
/*
* Context: softirq
*/
static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
struct oz_port *port)
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
oz_dbg(ON, "Deleting endpoints for configuration\n");
for (i = 0; i < port->num_iface; i++)
oz_clean_endpoints_for_interface(hcd, port, i);
spin_lock_bh(&ozhcd->hcd_lock);
if (port->iface) {
oz_dbg(ON, "Freeing interfaces object\n");
kfree(port->iface);
port->iface = NULL;
}
port->num_iface = 0;
spin_unlock_bh(&ozhcd->hcd_lock);
}
/*
* Context: tasklet
*/
static void *oz_claim_hpd(struct oz_port *port)
{
void *hpd;
struct oz_hcd *ozhcd = port->ozhcd;
spin_lock_bh(&ozhcd->hcd_lock);
hpd = port->hpd;
if (hpd)
oz_usb_get(hpd);
spin_unlock_bh(&ozhcd->hcd_lock);
return hpd;
}
/*
* Context: tasklet
*/
static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
gfp_t mem_flags)
{
struct usb_ctrlrequest *setup;
unsigned windex;
unsigned wvalue;
unsigned wlength;
void *hpd;
u8 req_id;
int rc = 0;
unsigned complete = 0;
int port_ix = -1;
struct oz_port *port = NULL;
oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
if (port_ix < 0) {
rc = -EPIPE;
goto out;
}
port = &ozhcd->ports[port_ix];
if (((port->flags & OZ_PORT_F_PRESENT) == 0)
|| (port->flags & OZ_PORT_F_DYING)) {
oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
port_ix, urb->dev->devnum);
rc = -EPIPE;
goto out;
}
/* Store port in private context data.
*/
urb->hcpriv = port;
setup = (struct usb_ctrlrequest *)urb->setup_packet;
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
wlength = le16_to_cpu(setup->wLength);
oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
req_id = port->next_req_id++;
hpd = oz_claim_hpd(port);
if (hpd == NULL) {
oz_dbg(ON, "Cannot claim port\n");
rc = -EPIPE;
goto out;
}
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
/* Standard requests
*/
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
break;
case USB_REQ_SET_ADDRESS:
oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
oz_dbg(ON, "Port %d address is 0x%x\n",
ozhcd->conn_port,
(u8)le16_to_cpu(setup->wValue));
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0) {
ozhcd->ports[ozhcd->conn_port].bus_addr =
(u8)le16_to_cpu(setup->wValue);
oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_unlock_bh(&ozhcd->hcd_lock);
complete = 1;
break;
case USB_REQ_SET_CONFIGURATION:
oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
break;
case USB_REQ_GET_CONFIGURATION:
/* We short circuit this case and reply directly since
* we have the selected configuration number cached.
*/
oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->config_num;
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_GET_INTERFACE:
/* We short circuit this case and reply directly since
* we have the selected interface alternative cached.
*/
oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->iface[(u8)windex].alt;
oz_dbg(ON, "interface = %d alt = %d\n",
windex, port->iface[(u8)windex].alt);
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_SET_INTERFACE:
oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
break;
}
}
if (!rc && !complete) {
int data_len = 0;
if ((setup->bRequestType & USB_DIR_IN) == 0)
data_len = wlength;
urb->actual_length = data_len;
if (oz_usb_control_req(port->hpd, req_id, setup,
urb->transfer_buffer, data_len)) {
rc = -ENOMEM;
} else {
/* Note: we are queuing the request after we have
* submitted it to be transmitted. If the request were
* to complete before we queued it then it would not
* be found in the queue. It seems impossible for
* this to happen but if it did the request would
* be resubmitted so the problem would hopefully
* resolve itself. Putting the request into the
* queue before it has been sent is worse since the
* urb could be cancelled while we are using it
* to build the request.
*/
if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
rc = -ENOMEM;
}
}
oz_usb_put(hpd);
out:
if (rc || complete) {
oz_dbg(ON, "Completing request locally\n");
oz_complete_urb(ozhcd->hcd, urb, rc);
} else {
oz_usb_request_heartbeat(port->hpd);
}
}
/*
* Context: tasklet
*/
static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
{
int rc = 0;
struct oz_port *port = urb->hcpriv;
u8 ep_addr;
/* When we are paranoid we keep a list of urbs which we check against
* before handing one back. This is just for debugging during
* development and should be turned off in the released driver.
*/
oz_remember_urb(urb);
/* Check buffer is valid.
*/
if (!urb->transfer_buffer && urb->transfer_buffer_length)
return -EINVAL;
/* Check if there is a device at the port - refuse if not.
*/
if ((port->flags & OZ_PORT_F_PRESENT) == 0)
return -EPIPE;
ep_addr = usb_pipeendpoint(urb->pipe);
if (ep_addr) {
/* If the request is not for EP0 then queue it.
*/
if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
urb, 0))
rc = -EPIPE;
} else {
oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
}
return rc;
}
/*
* Context: tasklet
*/
static void oz_urb_process_tasklet(unsigned long unused)
{
unsigned long irq_state;
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
struct oz_urb_link *urbl, *n;
int rc = 0;
if (ozhcd == NULL)
return;
/* This is called from a tasklet so is in softirq context but the urb
* list is filled from any context so we need to lock
* appropriately while removing urbs.
*/
spin_lock_irqsave(&g_tasklet_lock, irq_state);
list_for_each_entry_safe(urbl, n, &ozhcd->urb_pending_list, link) {
list_del_init(&urbl->link);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
urb = urbl->urb;
oz_free_urb_link(urbl);
rc = oz_urb_process(ozhcd, urb);
if (rc)
oz_complete_urb(ozhcd->hcd, urb, rc);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
/*
* This function searches for the urb in any of the lists it could be in.
* If it is found it is removed from the list and completed. If the urb is
* being processed then it won't be in a list so won't be found. However, the
* call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
* to a non-zero value. When an attempt is made to put the urb back in a list
* the unlinked field will be checked and the urb will then be completed.
* Context: tasklet
*/
static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
{
struct oz_urb_link *urbl = NULL;
struct list_head *e;
struct oz_hcd *ozhcd;
unsigned long irq_state;
u8 ix;
if (port == NULL) {
oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL) {
oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
return;
}
/* Look in the tasklet queue.
*/
spin_lock_irqsave(&g_tasklet_lock, irq_state);
list_for_each(e, &ozhcd->urb_cancel_list) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urb == urbl->urb) {
list_del_init(e);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
goto out2;
}
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
urbl = NULL;
/* Look in the orphanage.
*/
spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
list_for_each(e, &ozhcd->orphanage) {
urbl = list_entry(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del(e);
oz_dbg(ON, "Found urb in orphanage\n");
goto out;
}
}
ix = (ep_num & 0xf);
urbl = NULL;
if ((ep_num & USB_DIR_IN) && ix)
urbl = oz_remove_urb(port->in_ep[ix], urb);
else
urbl = oz_remove_urb(port->out_ep[ix], urb);
out:
spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
out2:
if (urbl) {
urb->actual_length = 0;
oz_free_urb_link(urbl);
oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
}
}
/*
* Context: tasklet
*/
static void oz_urb_cancel_tasklet(unsigned long unused)
{
unsigned long irq_state;
struct urb *urb;
struct oz_urb_link *urbl, *n;
struct oz_hcd *ozhcd = oz_hcd_claim();
if (ozhcd == NULL)
return;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
list_for_each_entry_safe(urbl, n, &ozhcd->urb_cancel_list, link) {
list_del_init(&urbl->link);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
urb = urbl->urb;
if (urb->unlinked)
oz_urb_cancel(urbl->port, urbl->ep_num, urb);
oz_free_urb_link(urbl);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
/*
* Context: unknown
*/
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
{
if (ozhcd) {
struct oz_urb_link *urbl, *n;
list_for_each_entry_safe(urbl, n, &ozhcd->orphanage, link) {
list_del(&urbl->link);
oz_complete_urb(ozhcd->hcd, urbl->urb, status);
oz_free_urb_link(urbl);
}
}
}
/*
* Context: unknown
*/
static int oz_hcd_start(struct usb_hcd *hcd)
{
hcd->power_budget = 200;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
return 0;
}
/*
* Context: unknown
*/
static void oz_hcd_stop(struct usb_hcd *hcd)
{
}
/*
* Context: unknown
*/
static void oz_hcd_shutdown(struct usb_hcd *hcd)
{
}
/*
* Called to queue an urb for the device.
* This function should return a non-zero error code if it fails the urb but
* should not call usb_hcd_giveback_urb().
* Context: any
*/
static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int rc;
int port_ix;
struct oz_port *port;
unsigned long irq_state;
struct oz_urb_link *urbl;
oz_dbg(URB, "%s: (%p)\n", __func__, urb);
if (unlikely(ozhcd == NULL)) {
oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
return -EPIPE;
}
if (unlikely(hcd->state != HC_STATE_RUNNING)) {
oz_dbg(URB, "Refused urb(%p) not running\n", urb);
return -EPIPE;
}
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
if (port_ix < 0)
return -EPIPE;
port = &ozhcd->ports[port_ix];
if (port == NULL)
return -EPIPE;
if (!(port->flags & OZ_PORT_F_PRESENT) ||
(port->flags & OZ_PORT_F_CHANGED)) {
oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
port_ix, urb->dev->devnum);
return -EPIPE;
}
urb->hcpriv = port;
/* Put request in queue for processing by tasklet.
*/
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
urbl->urb = urb;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (unlikely(rc)) {
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_free_urb_link(urbl);
return rc;
}
list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
tasklet_schedule(&g_urb_process_tasklet);
atomic_inc(&g_pending_urbs);
return 0;
}
/*
* Context: tasklet
*/
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
struct oz_urb_link *urbl;
if (unlikely(ep == NULL))
return NULL;
list_for_each_entry(urbl, &ep->urb_list, link) {
if (urbl->urb == urb) {
list_del_init(&urbl->link);
if (usb_pipeisoc(urb->pipe)) {
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
}
return urbl;
}
}
return NULL;
}
/*
* Called to dequeue a previously submitted urb for the device.
* Context: any
*/
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
struct oz_urb_link *urbl;
int rc;
unsigned long irq_state;
oz_dbg(URB, "%s: (%p)\n", __func__, urb);
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
/* The following function checks the urb is still in the queue
* maintained by the core and that the unlinked field is zero.
* If both are true the function sets the unlinked field and returns
* zero. Otherwise it returns an error.
*/
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
/* We have to check we haven't completed the urb or are about
* to complete it. When we do we set hcpriv to 0 so if this has
* already happened we don't put the urb in the cancel queue.
*/
if ((rc == 0) && urb->hcpriv) {
urbl->urb = urb;
urbl->port = (struct oz_port *)urb->hcpriv;
urbl->ep_num = usb_pipeendpoint(urb->pipe);
if (usb_pipein(urb->pipe))
urbl->ep_num |= USB_DIR_IN;
list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
tasklet_schedule(&g_urb_cancel_tasklet);
} else {
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_free_urb_link(urbl);
}
return rc;
}
/*
* Context: unknown
*/
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
}
/*
* Context: unknown
*/
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
}
/*
* Context: unknown
*/
static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
{
oz_dbg(ON, "oz_hcd_get_frame_number\n");
return oz_usb_get_frame_number();
}
/*
* Context: softirq
* This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
* always do that in softirq context.
*/
static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int i;
buf[0] = 0;
buf[1] = 0;
spin_lock_bh(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
oz_dbg(HUB, "Port %d changed\n", i);
ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
if (buf[0] != 0 || buf[1] != 0)
return 2;
return 0;
}
/*
* Context: process
*/
static void oz_get_hub_descriptor(struct usb_hcd *hcd,
struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
/*
* Context: process
*/
static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
{
struct oz_port *port;
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned set_bits = 0;
unsigned clear_bits = 0;
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
break;
case USB_PORT_FEAT_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
clear_bits = USB_PORT_STAT_RESET;
ozhcd->ports[port_id-1].bus_addr = 0;
break;
case USB_PORT_FEAT_POWER:
oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
set_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
break;
case USB_PORT_FEAT_C_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
break;
case USB_PORT_FEAT_C_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
break;
case USB_PORT_FEAT_TEST:
oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (set_bits || clear_bits) {
spin_lock_bh(&port->port_lock);
port->status &= ~clear_bits;
port->status |= set_bits;
spin_unlock_bh(&port->port_lock);
}
oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
return 0;
}
/*
* Context: process
*/
static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
{
struct oz_port *port;
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned clear_bits = 0;
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
clear_bits = USB_PORT_STAT_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
break;
case USB_PORT_FEAT_POWER:
oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
clear_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
clear_bits = USB_PORT_STAT_C_CONNECTION << 16;
break;
case USB_PORT_FEAT_C_ENABLE:
oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
clear_bits = USB_PORT_STAT_C_ENABLE << 16;
break;
case USB_PORT_FEAT_C_SUSPEND:
oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
clear_bits = USB_PORT_FEAT_C_RESET << 16;
break;
case USB_PORT_FEAT_TEST:
oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (clear_bits) {
spin_lock_bh(&port->port_lock);
port->status &= ~clear_bits;
spin_unlock_bh(&port->port_lock);
}
oz_dbg(HUB, "Port[%d] status = 0x%x\n",
port_id, ozhcd->ports[port_id-1].status);
return 0;
}
/*
* Context: process
*/
static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
{
struct oz_hcd *ozhcd;
u32 status;
if ((windex < 1) || (windex > OZ_NB_PORTS))
return -EPIPE;
ozhcd = oz_hcd_private(hcd);
oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
status = ozhcd->ports[windex-1].status;
put_unaligned(cpu_to_le32(status), (__le32 *)buf);
oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
return 0;
}
/*
* Context: process
*/
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength)
{
int err = 0;
switch (req_type) {
case ClearHubFeature:
oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
break;
case ClearPortFeature:
err = oz_clear_port_feature(hcd, wvalue, windex);
break;
case GetHubDescriptor:
oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
put_unaligned(cpu_to_le32(0), (__le32 *)buf);
break;
case GetPortStatus:
err = oz_get_port_status(hcd, windex, buf);
break;
case SetHubFeature:
oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
break;
case SetPortFeature:
err = oz_set_port_feature(hcd, wvalue, windex);
break;
default:
oz_dbg(HUB, "Other: %d\n", req_type);
break;
}
return err;
}
/*
* Context: process
*/
static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
hcd->state = HC_STATE_SUSPENDED;
ozhcd->flags |= OZ_HDC_F_SUSPENDED;
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
/*
* Context: process
*/
static int oz_hcd_bus_resume(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
hcd->state = HC_STATE_RUNNING;
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
static void oz_plat_shutdown(struct platform_device *dev)
{
}
/*
* Context: process
*/
static int oz_plat_probe(struct platform_device *dev)
{
int i;
int err;
struct usb_hcd *hcd;
struct oz_hcd *ozhcd;
hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
if (hcd == NULL) {
oz_dbg(ON, "Failed to created hcd object OK\n");
return -ENOMEM;
}
ozhcd = oz_hcd_private(hcd);
memset(ozhcd, 0, sizeof(*ozhcd));
INIT_LIST_HEAD(&ozhcd->urb_pending_list);
INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
INIT_LIST_HEAD(&ozhcd->orphanage);
ozhcd->hcd = hcd;
ozhcd->conn_port = -1;
spin_lock_init(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
struct oz_port *port = &ozhcd->ports[i];
port->ozhcd = ozhcd;
port->flags = 0;
port->status = 0;
port->bus_addr = 0xff;
spin_lock_init(&port->port_lock);
}
err = usb_add_hcd(hcd, 0, 0);
if (err) {
oz_dbg(ON, "Failed to add hcd object OK\n");
usb_put_hcd(hcd);
return -1;
}
device_wakeup_enable(hcd->self.controller);
spin_lock_bh(&g_hcdlock);
g_ozhcd = ozhcd;
spin_unlock_bh(&g_hcdlock);
return 0;
}
/*
* Context: unknown
*/
static int oz_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct oz_hcd *ozhcd;
if (hcd == NULL)
return -1;
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&g_hcdlock);
if (ozhcd == g_ozhcd)
g_ozhcd = NULL;
spin_unlock_bh(&g_hcdlock);
oz_dbg(ON, "Clearing orphanage\n");
oz_hcd_clear_orphanage(ozhcd, -EPIPE);
oz_dbg(ON, "Removing hcd\n");
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
return 0;
}
/*
* Context: unknown
*/
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
{
return 0;
}
/*
* Context: unknown
*/
static int oz_plat_resume(struct platform_device *dev)
{
return 0;
}
/*
* Context: process
*/
int oz_hcd_init(void)
{
int err;
if (usb_disabled())
return -ENODEV;
oz_urb_link_cache = KMEM_CACHE(oz_urb_link, 0);
if (!oz_urb_link_cache)
return -ENOMEM;
tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
err = platform_driver_register(&g_oz_plat_drv);
oz_dbg(ON, "platform_driver_register() returned %d\n", err);
if (err)
goto error;
g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
if (g_plat_dev == NULL) {
err = -ENOMEM;
goto error1;
}
oz_dbg(ON, "platform_device_alloc() succeeded\n");
err = platform_device_add(g_plat_dev);
if (err)
goto error2;
oz_dbg(ON, "platform_device_add() succeeded\n");
return 0;
error2:
platform_device_put(g_plat_dev);
error1:
platform_driver_unregister(&g_oz_plat_drv);
error:
tasklet_disable(&g_urb_process_tasklet);
tasklet_disable(&g_urb_cancel_tasklet);
oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
return err;
}
/*
* Context: process
*/
void oz_hcd_term(void)
{
msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
tasklet_kill(&g_urb_process_tasklet);
tasklet_kill(&g_urb_cancel_tasklet);
platform_device_unregister(g_plat_dev);
platform_driver_unregister(&g_oz_plat_drv);
oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));
kmem_cache_destroy(oz_urb_link_cache);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_1613_0 |
crossvul-cpp_data_bad_1635_1 | /* This file is part of libmspack.
* (C) 2003-2013 Stuart Caie.
*
* The LZX method was created by Jonathan Forbes and Tomi Poutanen, adapted
* by Microsoft Corporation.
*
* libmspack is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License (LGPL) version 2.1
*
* For further details, see the file COPYING.LIB distributed with libmspack
*/
/* LZX decompression implementation */
#include <system.h>
#include <lzx.h>
/* Microsoft's LZX document (in cab-sdk.exe) and their implementation
* of the com.ms.util.cab Java package do not concur.
*
* In the LZX document, there is a table showing the correlation between
* window size and the number of position slots. It states that the 1MB
* window = 40 slots and the 2MB window = 42 slots. In the implementation,
* 1MB = 42 slots, 2MB = 50 slots. The actual calculation is 'find the
* first slot whose position base is equal to or more than the required
* window size'. This would explain why other tables in the document refer
* to 50 slots rather than 42.
*
* The constant NUM_PRIMARY_LENGTHS used in the decompression pseudocode
* is not defined in the specification.
*
* The LZX document does not state the uncompressed block has an
* uncompressed length field. Where does this length field come from, so
* we can know how large the block is? The implementation has it as the 24
* bits following after the 3 blocktype bits, before the alignment
* padding.
*
* The LZX document states that aligned offset blocks have their aligned
* offset huffman tree AFTER the main and length trees. The implementation
* suggests that the aligned offset tree is BEFORE the main and length
* trees.
*
* The LZX document decoding algorithm states that, in an aligned offset
* block, if an extra_bits value is 1, 2 or 3, then that number of bits
* should be read and the result added to the match offset. This is
* correct for 1 and 2, but not 3, where just a huffman symbol (using the
* aligned tree) should be read.
*
* Regarding the E8 preprocessing, the LZX document states 'No translation
* may be performed on the last 6 bytes of the input block'. This is
* correct. However, the pseudocode provided checks for the *E8 leader*
* up to the last 6 bytes. If the leader appears between -10 and -7 bytes
* from the end, this would cause the next four bytes to be modified, at
* least one of which would be in the last 6 bytes, which is not allowed
* according to the spec.
*
* The specification states that the huffman trees must always contain at
* least one element. However, many CAB files contain blocks where the
* length tree is completely empty (because there are no matches), and
* this is expected to succeed.
*
* The errors in LZX documentation appear have been corrected in the
* new documentation for the LZX DELTA format.
*
* http://msdn.microsoft.com/en-us/library/cc483133.aspx
*
* However, this is a different format, an extension of regular LZX.
* I have noticed the following differences, there may be more:
*
* The maximum window size has increased from 2MB to 32MB. This also
* increases the maximum number of position slots, etc.
*
* If the match length is 257 (the maximum possible), this signals
* a further length decoding step, that allows for matches up to
* 33024 bytes long.
*
* The format now allows for "reference data", supplied by the caller.
* If match offsets go further back than the number of bytes
* decompressed so far, that is them accessing the reference data.
*/
/* import bit-reading macros and code */
#define BITS_TYPE struct lzxd_stream
#define BITS_VAR lzx
#define BITS_ORDER_MSB
#define READ_BYTES do { \
unsigned char b0, b1; \
READ_IF_NEEDED; b0 = *i_ptr++; \
READ_IF_NEEDED; b1 = *i_ptr++; \
INJECT_BITS((b1 << 8) | b0, 16); \
} while (0)
#include <readbits.h>
/* import huffman-reading macros and code */
#define TABLEBITS(tbl) LZX_##tbl##_TABLEBITS
#define MAXSYMBOLS(tbl) LZX_##tbl##_MAXSYMBOLS
#define HUFF_TABLE(tbl,idx) lzx->tbl##_table[idx]
#define HUFF_LEN(tbl,idx) lzx->tbl##_len[idx]
#define HUFF_ERROR return lzx->error = MSPACK_ERR_DECRUNCH
#include <readhuff.h>
/* BUILD_TABLE(tbl) builds a huffman lookup table from code lengths */
#define BUILD_TABLE(tbl) \
if (make_decode_table(MAXSYMBOLS(tbl), TABLEBITS(tbl), \
&HUFF_LEN(tbl,0), &HUFF_TABLE(tbl,0))) \
{ \
D(("failed to build %s table", #tbl)) \
return lzx->error = MSPACK_ERR_DECRUNCH; \
}
#define BUILD_TABLE_MAYBE_EMPTY(tbl) do { \
lzx->tbl##_empty = 0; \
if (make_decode_table(MAXSYMBOLS(tbl), TABLEBITS(tbl), \
&HUFF_LEN(tbl,0), &HUFF_TABLE(tbl,0))) \
{ \
for (i = 0; i < MAXSYMBOLS(tbl); i++) { \
if (HUFF_LEN(tbl, i) > 0) { \
D(("failed to build %s table", #tbl)) \
return lzx->error = MSPACK_ERR_DECRUNCH; \
} \
} \
/* empty tree - allow it, but don't decode symbols with it */ \
lzx->tbl##_empty = 1; \
} \
} while (0)
/* READ_LENGTHS(tablename, first, last) reads in code lengths for symbols
* first to last in the given table. The code lengths are stored in their
* own special LZX way.
*/
#define READ_LENGTHS(tbl, first, last) do { \
STORE_BITS; \
if (lzxd_read_lens(lzx, &HUFF_LEN(tbl, 0), (first), \
(unsigned int)(last))) return lzx->error; \
RESTORE_BITS; \
} while (0)
static int lzxd_read_lens(struct lzxd_stream *lzx, unsigned char *lens,
unsigned int first, unsigned int last)
{
/* bit buffer and huffman symbol decode variables */
register unsigned int bit_buffer;
register int bits_left, i;
register unsigned short sym;
unsigned char *i_ptr, *i_end;
unsigned int x, y;
int z;
RESTORE_BITS;
/* read lengths for pretree (20 symbols, lengths stored in fixed 4 bits) */
for (x = 0; x < 20; x++) {
READ_BITS(y, 4);
lzx->PRETREE_len[x] = y;
}
BUILD_TABLE(PRETREE);
for (x = first; x < last; ) {
READ_HUFFSYM(PRETREE, z);
if (z == 17) {
/* code = 17, run of ([read 4 bits]+4) zeros */
READ_BITS(y, 4); y += 4;
while (y--) lens[x++] = 0;
}
else if (z == 18) {
/* code = 18, run of ([read 5 bits]+20) zeros */
READ_BITS(y, 5); y += 20;
while (y--) lens[x++] = 0;
}
else if (z == 19) {
/* code = 19, run of ([read 1 bit]+4) [read huffman symbol] */
READ_BITS(y, 1); y += 4;
READ_HUFFSYM(PRETREE, z);
z = lens[x] - z; if (z < 0) z += 17;
while (y--) lens[x++] = z;
}
else {
/* code = 0 to 16, delta current length entry */
z = lens[x] - z; if (z < 0) z += 17;
lens[x++] = z;
}
}
STORE_BITS;
return MSPACK_ERR_OK;
}
/* LZX static data tables:
*
* LZX uses 'position slots' to represent match offsets. For every match,
* a small 'position slot' number and a small offset from that slot are
* encoded instead of one large offset.
*
* The number of slots is decided by how many are needed to encode the
* largest offset for a given window size. This is easy when the gap between
* slots is less than 128Kb, it's a linear relationship. But when extra_bits
* reaches its limit of 17 (because LZX can only ensure reading 17 bits of
* data at a time), we can only jump 128Kb at a time and have to start
* using more and more position slots as each window size doubles.
*
* position_base[] is an index to the position slot bases
*
* extra_bits[] states how many bits of offset-from-base data is needed.
*
* They are calculated as follows:
* extra_bits[i] = 0 where i < 4
* extra_bits[i] = floor(i/2)-1 where i >= 4 && i < 36
* extra_bits[i] = 17 where i >= 36
* position_base[0] = 0
* position_base[i] = position_base[i-1] + (1 << extra_bits[i-1])
*/
static const unsigned int position_slots[11] = {
30, 32, 34, 36, 38, 42, 50, 66, 98, 162, 290
};
static const unsigned char extra_bits[36] = {
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16
};
static const unsigned int position_base[290] = {
0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512,
768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768,
49152, 65536, 98304, 131072, 196608, 262144, 393216, 524288, 655360,
786432, 917504, 1048576, 1179648, 1310720, 1441792, 1572864, 1703936,
1835008, 1966080, 2097152, 2228224, 2359296, 2490368, 2621440, 2752512,
2883584, 3014656, 3145728, 3276800, 3407872, 3538944, 3670016, 3801088,
3932160, 4063232, 4194304, 4325376, 4456448, 4587520, 4718592, 4849664,
4980736, 5111808, 5242880, 5373952, 5505024, 5636096, 5767168, 5898240,
6029312, 6160384, 6291456, 6422528, 6553600, 6684672, 6815744, 6946816,
7077888, 7208960, 7340032, 7471104, 7602176, 7733248, 7864320, 7995392,
8126464, 8257536, 8388608, 8519680, 8650752, 8781824, 8912896, 9043968,
9175040, 9306112, 9437184, 9568256, 9699328, 9830400, 9961472, 10092544,
10223616, 10354688, 10485760, 10616832, 10747904, 10878976, 11010048,
11141120, 11272192, 11403264, 11534336, 11665408, 11796480, 11927552,
12058624, 12189696, 12320768, 12451840, 12582912, 12713984, 12845056,
12976128, 13107200, 13238272, 13369344, 13500416, 13631488, 13762560,
13893632, 14024704, 14155776, 14286848, 14417920, 14548992, 14680064,
14811136, 14942208, 15073280, 15204352, 15335424, 15466496, 15597568,
15728640, 15859712, 15990784, 16121856, 16252928, 16384000, 16515072,
16646144, 16777216, 16908288, 17039360, 17170432, 17301504, 17432576,
17563648, 17694720, 17825792, 17956864, 18087936, 18219008, 18350080,
18481152, 18612224, 18743296, 18874368, 19005440, 19136512, 19267584,
19398656, 19529728, 19660800, 19791872, 19922944, 20054016, 20185088,
20316160, 20447232, 20578304, 20709376, 20840448, 20971520, 21102592,
21233664, 21364736, 21495808, 21626880, 21757952, 21889024, 22020096,
22151168, 22282240, 22413312, 22544384, 22675456, 22806528, 22937600,
23068672, 23199744, 23330816, 23461888, 23592960, 23724032, 23855104,
23986176, 24117248, 24248320, 24379392, 24510464, 24641536, 24772608,
24903680, 25034752, 25165824, 25296896, 25427968, 25559040, 25690112,
25821184, 25952256, 26083328, 26214400, 26345472, 26476544, 26607616,
26738688, 26869760, 27000832, 27131904, 27262976, 27394048, 27525120,
27656192, 27787264, 27918336, 28049408, 28180480, 28311552, 28442624,
28573696, 28704768, 28835840, 28966912, 29097984, 29229056, 29360128,
29491200, 29622272, 29753344, 29884416, 30015488, 30146560, 30277632,
30408704, 30539776, 30670848, 30801920, 30932992, 31064064, 31195136,
31326208, 31457280, 31588352, 31719424, 31850496, 31981568, 32112640,
32243712, 32374784, 32505856, 32636928, 32768000, 32899072, 33030144,
33161216, 33292288, 33423360
};
static void lzxd_reset_state(struct lzxd_stream *lzx) {
int i;
lzx->R0 = 1;
lzx->R1 = 1;
lzx->R2 = 1;
lzx->header_read = 0;
lzx->block_remaining = 0;
lzx->block_type = LZX_BLOCKTYPE_INVALID;
/* initialise tables to 0 (because deltas will be applied to them) */
for (i = 0; i < LZX_MAINTREE_MAXSYMBOLS; i++) lzx->MAINTREE_len[i] = 0;
for (i = 0; i < LZX_LENGTH_MAXSYMBOLS; i++) lzx->LENGTH_len[i] = 0;
}
/*-------- main LZX code --------*/
struct lzxd_stream *lzxd_init(struct mspack_system *system,
struct mspack_file *input,
struct mspack_file *output,
int window_bits,
int reset_interval,
int input_buffer_size,
off_t output_length,
char is_delta)
{
unsigned int window_size = 1 << window_bits;
struct lzxd_stream *lzx;
if (!system) return NULL;
/* LZX DELTA window sizes are between 2^17 (128KiB) and 2^25 (32MiB),
* regular LZX windows are between 2^15 (32KiB) and 2^21 (2MiB)
*/
if (is_delta) {
if (window_bits < 17 || window_bits > 25) return NULL;
}
else {
if (window_bits < 15 || window_bits > 21) return NULL;
}
input_buffer_size = (input_buffer_size + 1) & -2;
if (!input_buffer_size) return NULL;
/* allocate decompression state */
if (!(lzx = (struct lzxd_stream *) system->alloc(system, sizeof(struct lzxd_stream)))) {
return NULL;
}
/* allocate decompression window and input buffer */
lzx->window = (unsigned char *) system->alloc(system, (size_t) window_size);
lzx->inbuf = (unsigned char *) system->alloc(system, (size_t) input_buffer_size);
if (!lzx->window || !lzx->inbuf) {
system->free(lzx->window);
system->free(lzx->inbuf);
system->free(lzx);
return NULL;
}
/* initialise decompression state */
lzx->sys = system;
lzx->input = input;
lzx->output = output;
lzx->offset = 0;
lzx->length = output_length;
lzx->inbuf_size = input_buffer_size;
lzx->window_size = 1 << window_bits;
lzx->ref_data_size = 0;
lzx->window_posn = 0;
lzx->frame_posn = 0;
lzx->frame = 0;
lzx->reset_interval = reset_interval;
lzx->intel_filesize = 0;
lzx->intel_curpos = 0;
lzx->intel_started = 0;
lzx->error = MSPACK_ERR_OK;
lzx->num_offsets = position_slots[window_bits - 15] << 3;
lzx->is_delta = is_delta;
lzx->o_ptr = lzx->o_end = &lzx->e8_buf[0];
lzxd_reset_state(lzx);
INIT_BITS;
return lzx;
}
int lzxd_set_reference_data(struct lzxd_stream *lzx,
struct mspack_system *system,
struct mspack_file *input,
unsigned int length)
{
if (!lzx) return MSPACK_ERR_ARGS;
if (!lzx->is_delta) {
D(("only LZX DELTA streams support reference data"))
return MSPACK_ERR_ARGS;
}
if (lzx->offset) {
D(("too late to set reference data after decoding starts"))
return MSPACK_ERR_ARGS;
}
if (length > lzx->window_size) {
D(("reference length (%u) is longer than the window", length))
return MSPACK_ERR_ARGS;
}
if (length > 0 && (!system || !input)) {
D(("length > 0 but no system or input"))
return MSPACK_ERR_ARGS;
}
lzx->ref_data_size = length;
if (length > 0) {
/* copy reference data */
unsigned char *pos = &lzx->window[lzx->window_size - length];
int bytes = system->read(input, pos, length);
/* length can't be more than 2^25, so no signedness problem */
if (bytes < (int)length) return MSPACK_ERR_READ;
}
lzx->ref_data_size = length;
return MSPACK_ERR_OK;
}
void lzxd_set_output_length(struct lzxd_stream *lzx, off_t out_bytes) {
if (lzx) lzx->length = out_bytes;
}
int lzxd_decompress(struct lzxd_stream *lzx, off_t out_bytes) {
/* bitstream and huffman reading variables */
register unsigned int bit_buffer;
register int bits_left, i=0;
unsigned char *i_ptr, *i_end;
register unsigned short sym;
int match_length, length_footer, extra, verbatim_bits, bytes_todo;
int this_run, main_element, aligned_bits, j;
unsigned char *window, *runsrc, *rundest, buf[12];
unsigned int frame_size=0, end_frame, match_offset, window_posn;
unsigned int R0, R1, R2;
/* easy answers */
if (!lzx || (out_bytes < 0)) return MSPACK_ERR_ARGS;
if (lzx->error) return lzx->error;
/* flush out any stored-up bytes before we begin */
i = lzx->o_end - lzx->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
}
if (out_bytes == 0) return MSPACK_ERR_OK;
/* restore local state */
RESTORE_BITS;
window = lzx->window;
window_posn = lzx->window_posn;
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
end_frame = (unsigned int)((lzx->offset + out_bytes) / LZX_FRAME_SIZE) + 1;
while (lzx->frame < end_frame) {
/* have we reached the reset interval? (if there is one?) */
if (lzx->reset_interval && ((lzx->frame % lzx->reset_interval) == 0)) {
if (lzx->block_remaining) {
D(("%d bytes remaining at reset interval", lzx->block_remaining))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-read the intel header and reset the huffman lengths */
lzxd_reset_state(lzx);
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
}
/* LZX DELTA format has chunk_size, not present in LZX format */
if (lzx->is_delta) {
ENSURE_BITS(16);
REMOVE_BITS(16);
}
/* read header if necessary */
if (!lzx->header_read) {
/* read 1 bit. if bit=0, intel filesize = 0.
* if bit=1, read intel filesize (32 bits) */
j = 0; READ_BITS(i, 1); if (i) { READ_BITS(i, 16); READ_BITS(j, 16); }
lzx->intel_filesize = (i << 16) | j;
lzx->header_read = 1;
}
/* calculate size of frame: all frames are 32k except the final frame
* which is 32kb or less. this can only be calculated when lzx->length
* has been filled in. */
frame_size = LZX_FRAME_SIZE;
if (lzx->length && (lzx->length - lzx->offset) < (off_t)frame_size) {
frame_size = lzx->length - lzx->offset;
}
/* decode until one more frame is available */
bytes_todo = lzx->frame_posn + frame_size - window_posn;
while (bytes_todo > 0) {
/* initialise new block, if one is needed */
if (lzx->block_remaining == 0) {
/* realign if previous block was an odd-sized UNCOMPRESSED block */
if ((lzx->block_type == LZX_BLOCKTYPE_UNCOMPRESSED) &&
(lzx->block_length & 1))
{
READ_IF_NEEDED;
i_ptr++;
}
/* read block type (3 bits) and block length (24 bits) */
READ_BITS(lzx->block_type, 3);
READ_BITS(i, 16); READ_BITS(j, 8);
lzx->block_remaining = lzx->block_length = (i << 8) | j;
/*D(("new block t%d len %u", lzx->block_type, lzx->block_length))*/
/* read individual block headers */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_ALIGNED:
/* read lengths of and build aligned huffman decoding tree */
for (i = 0; i < 8; i++) { READ_BITS(j, 3); lzx->ALIGNED_len[i] = j; }
BUILD_TABLE(ALIGNED);
/* no break -- rest of aligned header is same as verbatim */
case LZX_BLOCKTYPE_VERBATIM:
/* read lengths of and build main huffman decoding tree */
READ_LENGTHS(MAINTREE, 0, 256);
READ_LENGTHS(MAINTREE, 256, LZX_NUM_CHARS + lzx->num_offsets);
BUILD_TABLE(MAINTREE);
/* if the literal 0xE8 is anywhere in the block... */
if (lzx->MAINTREE_len[0xE8] != 0) lzx->intel_started = 1;
/* read lengths of and build lengths huffman decoding tree */
READ_LENGTHS(LENGTH, 0, LZX_NUM_SECONDARY_LENGTHS);
BUILD_TABLE_MAYBE_EMPTY(LENGTH);
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* because we can't assume otherwise */
lzx->intel_started = 1;
/* read 1-16 (not 0-15) bits to align to bytes */
ENSURE_BITS(16);
if (bits_left > 16) i_ptr -= 2;
bits_left = 0; bit_buffer = 0;
/* read 12 bytes of stored R0 / R1 / R2 values */
for (rundest = &buf[0], i = 0; i < 12; i++) {
READ_IF_NEEDED;
*rundest++ = *i_ptr++;
}
R0 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
R1 = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
R2 = buf[8] | (buf[9] << 8) | (buf[10] << 16) | (buf[11] << 24);
break;
default:
D(("bad block type"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
}
/* decode more of the block:
* run = min(what's available, what's needed) */
this_run = lzx->block_remaining;
if (this_run > bytes_todo) this_run = bytes_todo;
/* assume we decode exactly this_run bytes, for now */
bytes_todo -= this_run;
lzx->block_remaining -= this_run;
/* decode at least this_run bytes */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_VERBATIM:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1=R0; R0 = match_offset; break;
case 2: match_offset = R2; R2=R0; R0 = match_offset; break;
case 3: match_offset = 1; R2=R1; R1=R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
READ_BITS(verbatim_bits, extra);
match_offset = position_base[match_offset] - 2 + verbatim_bits;
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_ALIGNED:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1 = R0; R0 = match_offset; break;
case 2: match_offset = R2; R2 = R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
match_offset = position_base[match_offset] - 2;
if (extra > 3) {
/* verbatim and aligned bits */
extra -= 3;
READ_BITS(verbatim_bits, extra);
match_offset += (verbatim_bits << 3);
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra == 3) {
/* aligned bits only */
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra > 0) { /* extra==1, extra==2 */
/* verbatim bits only */
READ_BITS(verbatim_bits, extra);
match_offset += verbatim_bits;
}
else /* extra == 0 */ {
/* ??? not defined in LZX specification! */
match_offset = 1;
}
/* update repeated offset LRU queue */
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* as this_run is limited not to wrap a frame, this also means it
* won't wrap the window (as the window is a multiple of 32k) */
rundest = &window[window_posn];
window_posn += this_run;
while (this_run > 0) {
if ((i = i_end - i_ptr) == 0) {
READ_IF_NEEDED;
}
else {
if (i > this_run) i = this_run;
lzx->sys->copy(i_ptr, rundest, (size_t) i);
rundest += i;
i_ptr += i;
this_run -= i;
}
}
break;
default:
return lzx->error = MSPACK_ERR_DECRUNCH; /* might as well */
}
/* did the final match overrun our desired this_run length? */
if (this_run < 0) {
if ((unsigned int)(-this_run) > lzx->block_remaining) {
D(("overrun went past end of block by %d (%d remaining)",
-this_run, lzx->block_remaining ))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
lzx->block_remaining -= -this_run;
}
} /* while (bytes_todo > 0) */
/* streams don't extend over frame boundaries */
if ((window_posn - lzx->frame_posn) != frame_size) {
D(("decode beyond output frame limits! %d != %d",
window_posn - lzx->frame_posn, frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-align input bitstream */
if (bits_left > 0) ENSURE_BITS(16);
if (bits_left & 15) REMOVE_BITS(bits_left & 15);
/* check that we've used all of the previous frame first */
if (lzx->o_ptr != lzx->o_end) {
D(("%ld avail bytes, new %d frame",
(long)(lzx->o_end - lzx->o_ptr), frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* does this intel block _really_ need decoding? */
if (lzx->intel_started && lzx->intel_filesize &&
(lzx->frame <= 32768) && (frame_size > 10))
{
unsigned char *data = &lzx->e8_buf[0];
unsigned char *dataend = &lzx->e8_buf[frame_size - 10];
signed int curpos = lzx->intel_curpos;
signed int filesize = lzx->intel_filesize;
signed int abs_off, rel_off;
/* copy e8 block to the e8 buffer and tweak if needed */
lzx->o_ptr = data;
lzx->sys->copy(&lzx->window[lzx->frame_posn], data, frame_size);
while (data < dataend) {
if (*data++ != 0xE8) { curpos++; continue; }
abs_off = data[0] | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
if ((abs_off >= -curpos) && (abs_off < filesize)) {
rel_off = (abs_off >= 0) ? abs_off - curpos : abs_off + filesize;
data[0] = (unsigned char) rel_off;
data[1] = (unsigned char) (rel_off >> 8);
data[2] = (unsigned char) (rel_off >> 16);
data[3] = (unsigned char) (rel_off >> 24);
}
data += 4;
curpos += 5;
}
lzx->intel_curpos += frame_size;
}
else {
lzx->o_ptr = &lzx->window[lzx->frame_posn];
if (lzx->intel_filesize) lzx->intel_curpos += frame_size;
}
lzx->o_end = &lzx->o_ptr[frame_size];
/* write a frame */
i = (out_bytes < (off_t)frame_size) ? (unsigned int)out_bytes : frame_size;
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
/* advance frame start position */
lzx->frame_posn += frame_size;
lzx->frame++;
/* wrap window / frame position pointers */
if (window_posn == lzx->window_size) window_posn = 0;
if (lzx->frame_posn == lzx->window_size) lzx->frame_posn = 0;
} /* while (lzx->frame < end_frame) */
if (out_bytes) {
D(("bytes left to output"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* store local state */
STORE_BITS;
lzx->window_posn = window_posn;
lzx->R0 = R0;
lzx->R1 = R1;
lzx->R2 = R2;
return MSPACK_ERR_OK;
}
void lzxd_free(struct lzxd_stream *lzx) {
struct mspack_system *sys;
if (lzx) {
sys = lzx->sys;
sys->free(lzx->inbuf);
sys->free(lzx->window);
sys->free(lzx);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_1635_1 |
crossvul-cpp_data_bad_3665_1 | /*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
* Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
/*
* These are extra allocation routines which are likely to be less
* frequently used than those in malloc.c. They are separate in the
* hope that the .o file will be excluded from statically linked
* executables. We should probably break this up further.
*/
#include <stdio.h>
#include <string.h>
#ifdef MSWINCE
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN 1
# endif
# define NOSERVICE
# include <windows.h>
#else
# include <errno.h>
#endif
/* Some externally visible but unadvertised variables to allow access to */
/* free lists from inlined allocators without including gc_priv.h */
/* or introducing dependencies on internal data structure layouts. */
void ** const GC_objfreelist_ptr = GC_objfreelist;
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
# ifdef ATOMIC_UNCOLLECTABLE
void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
# endif
STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
{
switch(knd) {
# ifdef STUBBORN_ALLOC
case STUBBORN:
return(GC_malloc_stubborn((size_t)lb));
# endif
case PTRFREE:
return(GC_malloc_atomic((size_t)lb));
case NORMAL:
return(GC_malloc((size_t)lb));
case UNCOLLECTABLE:
return(GC_malloc_uncollectable((size_t)lb));
# ifdef ATOMIC_UNCOLLECTABLE
case AUNCOLLECTABLE:
return(GC_malloc_atomic_uncollectable((size_t)lb));
# endif /* ATOMIC_UNCOLLECTABLE */
default:
return(GC_generic_malloc(lb,knd));
}
}
/* Change the size of the block pointed to by p to contain at least */
/* lb bytes. The object may be (and quite likely will be) moved. */
/* The kind (e.g. atomic) is the same as that of the old. */
/* Shrinking of large blocks is not implemented well. */
GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
{
struct hblk * h;
hdr * hhdr;
size_t sz; /* Current size in bytes */
size_t orig_sz; /* Original sz in bytes */
int obj_kind;
if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
h = HBLKPTR(p);
hhdr = HDR(h);
sz = hhdr -> hb_sz;
obj_kind = hhdr -> hb_obj_kind;
orig_sz = sz;
if (sz > MAXOBJBYTES) {
/* Round it up to the next whole heap block */
word descr;
sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
hhdr -> hb_sz = sz;
descr = GC_obj_kinds[obj_kind].ok_descriptor;
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
hhdr -> hb_descr = descr;
# ifdef MARK_BIT_PER_OBJ
GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
# else
GC_ASSERT(hhdr -> hb_large_block &&
hhdr -> hb_map[ANY_INDEX] == 1);
# endif
if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
/* Extra area is already cleared by GC_alloc_large_and_clear. */
}
if (ADD_SLOP(lb) <= sz) {
if (lb >= (sz >> 1)) {
# ifdef STUBBORN_ALLOC
if (obj_kind == STUBBORN) GC_change_stubborn(p);
# endif
if (orig_sz > lb) {
/* Clear unneeded part of object to avoid bogus pointer */
/* tracing. */
/* Safe for stubborn objects. */
BZERO(((ptr_t)p) + lb, orig_sz - lb);
}
return(p);
} else {
/* shrink */
void * result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
/* Could also return original object. But this */
/* gives the client warning of imminent disaster. */
BCOPY(p, result, lb);
# ifndef IGNORE_FREE
GC_free(p);
# endif
return(result);
}
} else {
/* grow */
void * result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
BCOPY(p, result, sz);
# ifndef IGNORE_FREE
GC_free(p);
# endif
return(result);
}
}
# if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
# define REDIRECT_REALLOC GC_realloc
# endif
# ifdef REDIRECT_REALLOC
/* As with malloc, avoid two levels of extra calls here. */
# define GC_debug_realloc_replacement(p, lb) \
GC_debug_realloc(p, lb, GC_DBG_RA "unknown", 0)
void * realloc(void * p, size_t lb)
{
return(REDIRECT_REALLOC(p, lb));
}
# undef GC_debug_realloc_replacement
# endif /* REDIRECT_REALLOC */
/* Allocate memory such that only pointers to near the */
/* beginning of the object are considered. */
/* We avoid holding allocation lock while we clear memory. */
GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
{
void *result;
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
DCL_LOCK_STATE;
if (SMALL_OBJ(lb))
return(GC_generic_malloc((word)lb, k));
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
LOCK();
result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
if (0 == result) {
GC_oom_func oom_fn = GC_oom_fn;
UNLOCK();
return((*oom_fn)(lb));
} else {
UNLOCK();
if (init && !GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
}
return(result);
}
}
GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
{
return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
}
GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
{
return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
}
/* Increment GC_bytes_allocd from code that doesn't have direct access */
/* to GC_arrays. */
GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
{
GC_bytes_allocd += n;
}
/* The same for GC_bytes_freed. */
GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
{
GC_bytes_freed += n;
}
# ifdef PARALLEL_MARK
STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
/* Number of bytes of memory allocated since */
/* we released the GC lock. Instead of */
/* reacquiring the GC lock just to add this in, */
/* we add it in the next time we reacquire */
/* the lock. (Atomically adding it doesn't */
/* work, since we would have to atomically */
/* update it in GC_malloc, which is too */
/* expensive.) */
# endif /* PARALLEL_MARK */
/* Return a list of 1 or more objects of the indicated size, linked */
/* through the first word in the object. This has the advantage that */
/* it acquires the allocation lock only once, and may greatly reduce */
/* time wasted contending for the allocation lock. Typical usage would */
/* be in a thread that requires many items of the same size. It would */
/* keep its own free list in thread-local storage, and call */
/* GC_malloc_many or friends to replenish it. (We do not round up */
/* object sizes, since a call indicates the intention to consume many */
/* objects of exactly this size.) */
/* We assume that the size is a multiple of GRANULE_BYTES. */
/* We return the free-list by assigning it to *result, since it is */
/* not safe to return, e.g. a linked list of pointer-free objects, */
/* since the collector would not retain the entire list if it were */
/* invoked just as we were returning. */
/* Note that the client should usually clear the link field. */
GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
{
void *op;
void *p;
void **opp;
size_t lw; /* Length in words. */
size_t lg; /* Length in granules. */
signed_word my_bytes_allocd = 0;
struct obj_kind * ok = &(GC_obj_kinds[k]);
DCL_LOCK_STATE;
GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
if (!SMALL_OBJ(lb)) {
op = GC_generic_malloc(lb, k);
if (EXPECT(0 != op, TRUE))
obj_link(op) = 0;
*result = op;
return;
}
lw = BYTES_TO_WORDS(lb);
lg = BYTES_TO_GRANULES(lb);
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
LOCK();
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
if (GC_incremental && !GC_dont_gc) {
ENTER_GC();
GC_collect_a_little_inner(1);
EXIT_GC();
}
/* First see if we can reclaim a page of objects waiting to be */
/* reclaimed. */
{
struct hblk ** rlh = ok -> ok_reclaim_list;
struct hblk * hbp;
hdr * hhdr;
rlh += lg;
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
GC_ASSERT(hhdr -> hb_sz == lb);
hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
# ifdef PARALLEL_MARK
if (GC_parallel) {
signed_word my_bytes_allocd_tmp =
(signed_word)AO_load(&GC_bytes_allocd_tmp);
GC_ASSERT(my_bytes_allocd_tmp >= 0);
/* We only decrement it while holding the GC lock. */
/* Thus we can't accidentally adjust it down in more */
/* than one thread simultaneously. */
if (my_bytes_allocd_tmp != 0) {
(void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
(AO_t)(-my_bytes_allocd_tmp));
GC_bytes_allocd += my_bytes_allocd_tmp;
}
GC_acquire_mark_lock();
++ GC_fl_builder_count;
UNLOCK();
GC_release_mark_lock();
}
# endif
op = GC_reclaim_generic(hbp, hhdr, lb,
ok -> ok_init, 0, &my_bytes_allocd);
if (op != 0) {
/* We also reclaimed memory, so we need to adjust */
/* that count. */
/* This should be atomic, so the results may be */
/* inaccurate. */
GC_bytes_found += my_bytes_allocd;
# ifdef PARALLEL_MARK
if (GC_parallel) {
*result = op;
(void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
(AO_t)my_bytes_allocd);
GC_acquire_mark_lock();
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
GC_release_mark_lock();
(void) GC_clear_stack(0);
return;
}
# endif
GC_bytes_allocd += my_bytes_allocd;
goto out;
}
# ifdef PARALLEL_MARK
if (GC_parallel) {
GC_acquire_mark_lock();
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
GC_release_mark_lock();
LOCK();
/* GC lock is needed for reclaim list access. We */
/* must decrement fl_builder_count before reaquiring GC */
/* lock. Hopefully this path is rare. */
}
# endif
}
}
/* Next try to use prefix of global free list if there is one. */
/* We don't refill it, but we need to use it up before allocating */
/* a new block ourselves. */
opp = &(GC_obj_kinds[k].ok_freelist[lg]);
if ( (op = *opp) != 0 ) {
*opp = 0;
my_bytes_allocd = 0;
for (p = op; p != 0; p = obj_link(p)) {
my_bytes_allocd += lb;
if ((word)my_bytes_allocd >= HBLKSIZE) {
*opp = obj_link(p);
obj_link(p) = 0;
break;
}
}
GC_bytes_allocd += my_bytes_allocd;
goto out;
}
/* Next try to allocate a new block worth of objects of this size. */
{
struct hblk *h = GC_allochblk(lb, k, 0);
if (h != 0) {
if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
# ifdef PARALLEL_MARK
if (GC_parallel) {
GC_acquire_mark_lock();
++ GC_fl_builder_count;
UNLOCK();
GC_release_mark_lock();
op = GC_build_fl(h, lw,
(ok -> ok_init || GC_debugging_started), 0);
*result = op;
GC_acquire_mark_lock();
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
GC_release_mark_lock();
(void) GC_clear_stack(0);
return;
}
# endif
op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
goto out;
}
}
/* As a last attempt, try allocating a single object. Note that */
/* this may trigger a collection or expand the heap. */
op = GC_generic_malloc_inner(lb, k);
if (0 != op) obj_link(op) = 0;
out:
*result = op;
UNLOCK();
(void) GC_clear_stack(0);
}
/* Note that the "atomic" version of this would be unsafe, since the */
/* links would not be seen by the collector. */
GC_API void * GC_CALL GC_malloc_many(size_t lb)
{
void *result;
GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
& ~(GRANULE_BYTES-1),
NORMAL, &result);
return result;
}
/* Not well tested nor integrated. */
/* Debug version is tricky and currently missing. */
#include <limits.h>
GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
{
size_t new_lb;
size_t offset;
ptr_t result;
if (align <= GRANULE_BYTES) return GC_malloc(lb);
if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
if (align > HBLKSIZE) {
return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
}
return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
/* Will be HBLKSIZE aligned. */
}
/* We could also try to make sure that the real rounded-up object size */
/* is a multiple of align. That would be correct up to HBLKSIZE. */
new_lb = lb + align - 1;
result = GC_malloc(new_lb);
offset = (word)result % align;
if (offset != 0) {
offset = align - offset;
if (!GC_all_interior_pointers) {
if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
GC_register_displacement(offset);
}
}
result = (void *) ((ptr_t)result + offset);
GC_ASSERT((word)result % align == 0);
return result;
}
/* This one exists largerly to redirect posix_memalign for leaks finding. */
GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
{
/* Check alignment properly. */
if (((align - 1) & align) != 0 || align < sizeof(void *)) {
# ifdef MSWINCE
return ERROR_INVALID_PARAMETER;
# else
return EINVAL;
# endif
}
if ((*memptr = GC_memalign(align, lb)) == NULL) {
# ifdef MSWINCE
return ERROR_NOT_ENOUGH_MEMORY;
# else
return ENOMEM;
# endif
}
return 0;
}
#ifdef ATOMIC_UNCOLLECTABLE
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
/* This is normally roughly equivalent to the system malloc. */
/* But it may be useful if malloc is redefined. */
GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
lg = GC_size_map[lb];
opp = &(GC_auobjfreelist[lg]);
LOCK();
op = *opp;
if (EXPECT(0 != op, TRUE)) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit was already set while object was on free list. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
}
GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
hdr * hhdr;
op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
if (0 == op) return(0);
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
hhdr = HDR(op);
LOCK();
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
GC_ASSERT(hhdr -> hb_n_marks == 0);
hhdr -> hb_n_marks = 1;
UNLOCK();
return((void *) op);
}
}
#endif /* ATOMIC_UNCOLLECTABLE */
/* provide a version of strdup() that uses the collector to allocate the
copy of the string */
GC_API char * GC_CALL GC_strdup(const char *s)
{
char *copy;
size_t lb;
if (s == NULL) return NULL;
lb = strlen(s) + 1;
if ((copy = GC_malloc_atomic(lb)) == NULL) {
# ifndef MSWINCE
errno = ENOMEM;
# endif
return NULL;
}
# ifndef MSWINCE
strcpy(copy, s);
# else
/* strcpy() is deprecated in WinCE */
memcpy(copy, s, lb);
# endif
return copy;
}
GC_API char * GC_CALL GC_strndup(const char *str, size_t size)
{
char *copy;
size_t len = strlen(str); /* str is expected to be non-NULL */
if (len > size)
len = size;
copy = GC_malloc_atomic(len + 1);
if (copy == NULL) {
# ifndef MSWINCE
errno = ENOMEM;
# endif
return NULL;
}
BCOPY(str, copy, len);
copy[len] = '\0';
return copy;
}
#ifdef GC_REQUIRE_WCSDUP
# include <wchar.h> /* for wcslen() */
GC_API wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
{
size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
wchar_t *copy = GC_malloc_atomic(lb);
if (copy == NULL) {
# ifndef MSWINCE
errno = ENOMEM;
# endif
return NULL;
}
BCOPY(str, copy, lb);
return copy;
}
#endif /* GC_REQUIRE_WCSDUP */
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3665_1 |
crossvul-cpp_data_bad_5818_0 | /*
* PNG image format
* Copyright (c) 2008 Loren Merrit <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "png.h"
#include "pngdsp.h"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL/255 * 0x7f)
#define pb_80 (~0UL/255 * 0x80)
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
{
long i;
for (i = 0; i <= w - sizeof(long); i += sizeof(long)) {
long a = *(long *)(src1 + i);
long b = *(long *)(src2 + i);
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
}
for (; i < w; i++)
dst[i] = src1[i] + src2[i];
}
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
{
dsp->add_bytes_l2 = add_bytes_l2_c;
dsp->add_paeth_prediction = ff_add_png_paeth_prediction;
if (ARCH_X86) ff_pngdsp_init_x86(dsp);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5818_0 |
crossvul-cpp_data_good_1635_1 | /* This file is part of libmspack.
* (C) 2003-2013 Stuart Caie.
*
* The LZX method was created by Jonathan Forbes and Tomi Poutanen, adapted
* by Microsoft Corporation.
*
* libmspack is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License (LGPL) version 2.1
*
* For further details, see the file COPYING.LIB distributed with libmspack
*/
/* LZX decompression implementation */
#include <system.h>
#include <lzx.h>
/* Microsoft's LZX document (in cab-sdk.exe) and their implementation
* of the com.ms.util.cab Java package do not concur.
*
* In the LZX document, there is a table showing the correlation between
* window size and the number of position slots. It states that the 1MB
* window = 40 slots and the 2MB window = 42 slots. In the implementation,
* 1MB = 42 slots, 2MB = 50 slots. The actual calculation is 'find the
* first slot whose position base is equal to or more than the required
* window size'. This would explain why other tables in the document refer
* to 50 slots rather than 42.
*
* The constant NUM_PRIMARY_LENGTHS used in the decompression pseudocode
* is not defined in the specification.
*
* The LZX document does not state the uncompressed block has an
* uncompressed length field. Where does this length field come from, so
* we can know how large the block is? The implementation has it as the 24
* bits following after the 3 blocktype bits, before the alignment
* padding.
*
* The LZX document states that aligned offset blocks have their aligned
* offset huffman tree AFTER the main and length trees. The implementation
* suggests that the aligned offset tree is BEFORE the main and length
* trees.
*
* The LZX document decoding algorithm states that, in an aligned offset
* block, if an extra_bits value is 1, 2 or 3, then that number of bits
* should be read and the result added to the match offset. This is
* correct for 1 and 2, but not 3, where just a huffman symbol (using the
* aligned tree) should be read.
*
* Regarding the E8 preprocessing, the LZX document states 'No translation
* may be performed on the last 6 bytes of the input block'. This is
* correct. However, the pseudocode provided checks for the *E8 leader*
* up to the last 6 bytes. If the leader appears between -10 and -7 bytes
* from the end, this would cause the next four bytes to be modified, at
* least one of which would be in the last 6 bytes, which is not allowed
* according to the spec.
*
* The specification states that the huffman trees must always contain at
* least one element. However, many CAB files contain blocks where the
* length tree is completely empty (because there are no matches), and
* this is expected to succeed.
*
* The errors in LZX documentation appear have been corrected in the
* new documentation for the LZX DELTA format.
*
* http://msdn.microsoft.com/en-us/library/cc483133.aspx
*
* However, this is a different format, an extension of regular LZX.
* I have noticed the following differences, there may be more:
*
* The maximum window size has increased from 2MB to 32MB. This also
* increases the maximum number of position slots, etc.
*
* If the match length is 257 (the maximum possible), this signals
* a further length decoding step, that allows for matches up to
* 33024 bytes long.
*
* The format now allows for "reference data", supplied by the caller.
* If match offsets go further back than the number of bytes
* decompressed so far, that is them accessing the reference data.
*/
/* import bit-reading macros and code */
#define BITS_TYPE struct lzxd_stream
#define BITS_VAR lzx
#define BITS_ORDER_MSB
#define READ_BYTES do { \
unsigned char b0, b1; \
READ_IF_NEEDED; b0 = *i_ptr++; \
READ_IF_NEEDED; b1 = *i_ptr++; \
INJECT_BITS((b1 << 8) | b0, 16); \
} while (0)
#include <readbits.h>
/* import huffman-reading macros and code */
#define TABLEBITS(tbl) LZX_##tbl##_TABLEBITS
#define MAXSYMBOLS(tbl) LZX_##tbl##_MAXSYMBOLS
#define HUFF_TABLE(tbl,idx) lzx->tbl##_table[idx]
#define HUFF_LEN(tbl,idx) lzx->tbl##_len[idx]
#define HUFF_ERROR return lzx->error = MSPACK_ERR_DECRUNCH
#include <readhuff.h>
/* BUILD_TABLE(tbl) builds a huffman lookup table from code lengths */
#define BUILD_TABLE(tbl) \
if (make_decode_table(MAXSYMBOLS(tbl), TABLEBITS(tbl), \
&HUFF_LEN(tbl,0), &HUFF_TABLE(tbl,0))) \
{ \
D(("failed to build %s table", #tbl)) \
return lzx->error = MSPACK_ERR_DECRUNCH; \
}
#define BUILD_TABLE_MAYBE_EMPTY(tbl) do { \
lzx->tbl##_empty = 0; \
if (make_decode_table(MAXSYMBOLS(tbl), TABLEBITS(tbl), \
&HUFF_LEN(tbl,0), &HUFF_TABLE(tbl,0))) \
{ \
for (i = 0; i < MAXSYMBOLS(tbl); i++) { \
if (HUFF_LEN(tbl, i) > 0) { \
D(("failed to build %s table", #tbl)) \
return lzx->error = MSPACK_ERR_DECRUNCH; \
} \
} \
/* empty tree - allow it, but don't decode symbols with it */ \
lzx->tbl##_empty = 1; \
} \
} while (0)
/* READ_LENGTHS(tablename, first, last) reads in code lengths for symbols
* first to last in the given table. The code lengths are stored in their
* own special LZX way.
*/
#define READ_LENGTHS(tbl, first, last) do { \
STORE_BITS; \
if (lzxd_read_lens(lzx, &HUFF_LEN(tbl, 0), (first), \
(unsigned int)(last))) return lzx->error; \
RESTORE_BITS; \
} while (0)
static int lzxd_read_lens(struct lzxd_stream *lzx, unsigned char *lens,
unsigned int first, unsigned int last)
{
/* bit buffer and huffman symbol decode variables */
register unsigned int bit_buffer;
register int bits_left, i;
register unsigned short sym;
unsigned char *i_ptr, *i_end;
unsigned int x, y;
int z;
RESTORE_BITS;
/* read lengths for pretree (20 symbols, lengths stored in fixed 4 bits) */
for (x = 0; x < 20; x++) {
READ_BITS(y, 4);
lzx->PRETREE_len[x] = y;
}
BUILD_TABLE(PRETREE);
for (x = first; x < last; ) {
READ_HUFFSYM(PRETREE, z);
if (z == 17) {
/* code = 17, run of ([read 4 bits]+4) zeros */
READ_BITS(y, 4); y += 4;
while (y--) lens[x++] = 0;
}
else if (z == 18) {
/* code = 18, run of ([read 5 bits]+20) zeros */
READ_BITS(y, 5); y += 20;
while (y--) lens[x++] = 0;
}
else if (z == 19) {
/* code = 19, run of ([read 1 bit]+4) [read huffman symbol] */
READ_BITS(y, 1); y += 4;
READ_HUFFSYM(PRETREE, z);
z = lens[x] - z; if (z < 0) z += 17;
while (y--) lens[x++] = z;
}
else {
/* code = 0 to 16, delta current length entry */
z = lens[x] - z; if (z < 0) z += 17;
lens[x++] = z;
}
}
STORE_BITS;
return MSPACK_ERR_OK;
}
/* LZX static data tables:
*
* LZX uses 'position slots' to represent match offsets. For every match,
* a small 'position slot' number and a small offset from that slot are
* encoded instead of one large offset.
*
* The number of slots is decided by how many are needed to encode the
* largest offset for a given window size. This is easy when the gap between
* slots is less than 128Kb, it's a linear relationship. But when extra_bits
* reaches its limit of 17 (because LZX can only ensure reading 17 bits of
* data at a time), we can only jump 128Kb at a time and have to start
* using more and more position slots as each window size doubles.
*
* position_base[] is an index to the position slot bases
*
* extra_bits[] states how many bits of offset-from-base data is needed.
*
* They are calculated as follows:
* extra_bits[i] = 0 where i < 4
* extra_bits[i] = floor(i/2)-1 where i >= 4 && i < 36
* extra_bits[i] = 17 where i >= 36
* position_base[0] = 0
* position_base[i] = position_base[i-1] + (1 << extra_bits[i-1])
*/
static const unsigned int position_slots[11] = {
30, 32, 34, 36, 38, 42, 50, 66, 98, 162, 290
};
static const unsigned char extra_bits[36] = {
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16
};
static const unsigned int position_base[290] = {
0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512,
768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768,
49152, 65536, 98304, 131072, 196608, 262144, 393216, 524288, 655360,
786432, 917504, 1048576, 1179648, 1310720, 1441792, 1572864, 1703936,
1835008, 1966080, 2097152, 2228224, 2359296, 2490368, 2621440, 2752512,
2883584, 3014656, 3145728, 3276800, 3407872, 3538944, 3670016, 3801088,
3932160, 4063232, 4194304, 4325376, 4456448, 4587520, 4718592, 4849664,
4980736, 5111808, 5242880, 5373952, 5505024, 5636096, 5767168, 5898240,
6029312, 6160384, 6291456, 6422528, 6553600, 6684672, 6815744, 6946816,
7077888, 7208960, 7340032, 7471104, 7602176, 7733248, 7864320, 7995392,
8126464, 8257536, 8388608, 8519680, 8650752, 8781824, 8912896, 9043968,
9175040, 9306112, 9437184, 9568256, 9699328, 9830400, 9961472, 10092544,
10223616, 10354688, 10485760, 10616832, 10747904, 10878976, 11010048,
11141120, 11272192, 11403264, 11534336, 11665408, 11796480, 11927552,
12058624, 12189696, 12320768, 12451840, 12582912, 12713984, 12845056,
12976128, 13107200, 13238272, 13369344, 13500416, 13631488, 13762560,
13893632, 14024704, 14155776, 14286848, 14417920, 14548992, 14680064,
14811136, 14942208, 15073280, 15204352, 15335424, 15466496, 15597568,
15728640, 15859712, 15990784, 16121856, 16252928, 16384000, 16515072,
16646144, 16777216, 16908288, 17039360, 17170432, 17301504, 17432576,
17563648, 17694720, 17825792, 17956864, 18087936, 18219008, 18350080,
18481152, 18612224, 18743296, 18874368, 19005440, 19136512, 19267584,
19398656, 19529728, 19660800, 19791872, 19922944, 20054016, 20185088,
20316160, 20447232, 20578304, 20709376, 20840448, 20971520, 21102592,
21233664, 21364736, 21495808, 21626880, 21757952, 21889024, 22020096,
22151168, 22282240, 22413312, 22544384, 22675456, 22806528, 22937600,
23068672, 23199744, 23330816, 23461888, 23592960, 23724032, 23855104,
23986176, 24117248, 24248320, 24379392, 24510464, 24641536, 24772608,
24903680, 25034752, 25165824, 25296896, 25427968, 25559040, 25690112,
25821184, 25952256, 26083328, 26214400, 26345472, 26476544, 26607616,
26738688, 26869760, 27000832, 27131904, 27262976, 27394048, 27525120,
27656192, 27787264, 27918336, 28049408, 28180480, 28311552, 28442624,
28573696, 28704768, 28835840, 28966912, 29097984, 29229056, 29360128,
29491200, 29622272, 29753344, 29884416, 30015488, 30146560, 30277632,
30408704, 30539776, 30670848, 30801920, 30932992, 31064064, 31195136,
31326208, 31457280, 31588352, 31719424, 31850496, 31981568, 32112640,
32243712, 32374784, 32505856, 32636928, 32768000, 32899072, 33030144,
33161216, 33292288, 33423360
};
static void lzxd_reset_state(struct lzxd_stream *lzx) {
int i;
lzx->R0 = 1;
lzx->R1 = 1;
lzx->R2 = 1;
lzx->header_read = 0;
lzx->block_remaining = 0;
lzx->block_type = LZX_BLOCKTYPE_INVALID;
/* initialise tables to 0 (because deltas will be applied to them) */
for (i = 0; i < LZX_MAINTREE_MAXSYMBOLS; i++) lzx->MAINTREE_len[i] = 0;
for (i = 0; i < LZX_LENGTH_MAXSYMBOLS; i++) lzx->LENGTH_len[i] = 0;
}
/*-------- main LZX code --------*/
struct lzxd_stream *lzxd_init(struct mspack_system *system,
struct mspack_file *input,
struct mspack_file *output,
int window_bits,
int reset_interval,
int input_buffer_size,
off_t output_length,
char is_delta)
{
unsigned int window_size = 1 << window_bits;
struct lzxd_stream *lzx;
if (!system) return NULL;
/* LZX DELTA window sizes are between 2^17 (128KiB) and 2^25 (32MiB),
* regular LZX windows are between 2^15 (32KiB) and 2^21 (2MiB)
*/
if (is_delta) {
if (window_bits < 17 || window_bits > 25) return NULL;
}
else {
if (window_bits < 15 || window_bits > 21) return NULL;
}
input_buffer_size = (input_buffer_size + 1) & -2;
if (!input_buffer_size) return NULL;
/* allocate decompression state */
if (!(lzx = (struct lzxd_stream *) system->alloc(system, sizeof(struct lzxd_stream)))) {
return NULL;
}
/* allocate decompression window and input buffer */
lzx->window = (unsigned char *) system->alloc(system, (size_t) window_size);
lzx->inbuf = (unsigned char *) system->alloc(system, (size_t) input_buffer_size);
if (!lzx->window || !lzx->inbuf) {
system->free(lzx->window);
system->free(lzx->inbuf);
system->free(lzx);
return NULL;
}
/* initialise decompression state */
lzx->sys = system;
lzx->input = input;
lzx->output = output;
lzx->offset = 0;
lzx->length = output_length;
lzx->inbuf_size = input_buffer_size;
lzx->window_size = 1 << window_bits;
lzx->ref_data_size = 0;
lzx->window_posn = 0;
lzx->frame_posn = 0;
lzx->frame = 0;
lzx->reset_interval = reset_interval;
lzx->intel_filesize = 0;
lzx->intel_curpos = 0;
lzx->intel_started = 0;
lzx->error = MSPACK_ERR_OK;
lzx->num_offsets = position_slots[window_bits - 15] << 3;
lzx->is_delta = is_delta;
lzx->o_ptr = lzx->o_end = &lzx->e8_buf[0];
lzxd_reset_state(lzx);
INIT_BITS;
return lzx;
}
int lzxd_set_reference_data(struct lzxd_stream *lzx,
struct mspack_system *system,
struct mspack_file *input,
unsigned int length)
{
if (!lzx) return MSPACK_ERR_ARGS;
if (!lzx->is_delta) {
D(("only LZX DELTA streams support reference data"))
return MSPACK_ERR_ARGS;
}
if (lzx->offset) {
D(("too late to set reference data after decoding starts"))
return MSPACK_ERR_ARGS;
}
if (length > lzx->window_size) {
D(("reference length (%u) is longer than the window", length))
return MSPACK_ERR_ARGS;
}
if (length > 0 && (!system || !input)) {
D(("length > 0 but no system or input"))
return MSPACK_ERR_ARGS;
}
lzx->ref_data_size = length;
if (length > 0) {
/* copy reference data */
unsigned char *pos = &lzx->window[lzx->window_size - length];
int bytes = system->read(input, pos, length);
/* length can't be more than 2^25, so no signedness problem */
if (bytes < (int)length) return MSPACK_ERR_READ;
}
lzx->ref_data_size = length;
return MSPACK_ERR_OK;
}
void lzxd_set_output_length(struct lzxd_stream *lzx, off_t out_bytes) {
if (lzx) lzx->length = out_bytes;
}
int lzxd_decompress(struct lzxd_stream *lzx, off_t out_bytes) {
/* bitstream and huffman reading variables */
register unsigned int bit_buffer;
register int bits_left, i=0;
unsigned char *i_ptr, *i_end;
register unsigned short sym;
int match_length, length_footer, extra, verbatim_bits, bytes_todo;
int this_run, main_element, aligned_bits, j;
unsigned char *window, *runsrc, *rundest, buf[12];
unsigned int frame_size=0, end_frame, match_offset, window_posn;
unsigned int R0, R1, R2;
/* easy answers */
if (!lzx || (out_bytes < 0)) return MSPACK_ERR_ARGS;
if (lzx->error) return lzx->error;
/* flush out any stored-up bytes before we begin */
i = lzx->o_end - lzx->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
}
if (out_bytes == 0) return MSPACK_ERR_OK;
/* restore local state */
RESTORE_BITS;
window = lzx->window;
window_posn = lzx->window_posn;
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
end_frame = (unsigned int)((lzx->offset + out_bytes) / LZX_FRAME_SIZE) + 1;
while (lzx->frame < end_frame) {
/* have we reached the reset interval? (if there is one?) */
if (lzx->reset_interval && ((lzx->frame % lzx->reset_interval) == 0)) {
if (lzx->block_remaining) {
D(("%d bytes remaining at reset interval", lzx->block_remaining))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-read the intel header and reset the huffman lengths */
lzxd_reset_state(lzx);
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
}
/* LZX DELTA format has chunk_size, not present in LZX format */
if (lzx->is_delta) {
ENSURE_BITS(16);
REMOVE_BITS(16);
}
/* read header if necessary */
if (!lzx->header_read) {
/* read 1 bit. if bit=0, intel filesize = 0.
* if bit=1, read intel filesize (32 bits) */
j = 0; READ_BITS(i, 1); if (i) { READ_BITS(i, 16); READ_BITS(j, 16); }
lzx->intel_filesize = (i << 16) | j;
lzx->header_read = 1;
}
/* calculate size of frame: all frames are 32k except the final frame
* which is 32kb or less. this can only be calculated when lzx->length
* has been filled in. */
frame_size = LZX_FRAME_SIZE;
if (lzx->length && (lzx->length - lzx->offset) < (off_t)frame_size) {
frame_size = lzx->length - lzx->offset;
}
/* decode until one more frame is available */
bytes_todo = lzx->frame_posn + frame_size - window_posn;
while (bytes_todo > 0) {
/* initialise new block, if one is needed */
if (lzx->block_remaining == 0) {
/* realign if previous block was an odd-sized UNCOMPRESSED block */
if ((lzx->block_type == LZX_BLOCKTYPE_UNCOMPRESSED) &&
(lzx->block_length & 1))
{
READ_IF_NEEDED;
i_ptr++;
}
/* read block type (3 bits) and block length (24 bits) */
READ_BITS(lzx->block_type, 3);
READ_BITS(i, 16); READ_BITS(j, 8);
lzx->block_remaining = lzx->block_length = (i << 8) | j;
/*D(("new block t%d len %u", lzx->block_type, lzx->block_length))*/
/* read individual block headers */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_ALIGNED:
/* read lengths of and build aligned huffman decoding tree */
for (i = 0; i < 8; i++) { READ_BITS(j, 3); lzx->ALIGNED_len[i] = j; }
BUILD_TABLE(ALIGNED);
/* no break -- rest of aligned header is same as verbatim */
case LZX_BLOCKTYPE_VERBATIM:
/* read lengths of and build main huffman decoding tree */
READ_LENGTHS(MAINTREE, 0, 256);
READ_LENGTHS(MAINTREE, 256, LZX_NUM_CHARS + lzx->num_offsets);
BUILD_TABLE(MAINTREE);
/* if the literal 0xE8 is anywhere in the block... */
if (lzx->MAINTREE_len[0xE8] != 0) lzx->intel_started = 1;
/* read lengths of and build lengths huffman decoding tree */
READ_LENGTHS(LENGTH, 0, LZX_NUM_SECONDARY_LENGTHS);
BUILD_TABLE_MAYBE_EMPTY(LENGTH);
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* because we can't assume otherwise */
lzx->intel_started = 1;
/* read 1-16 (not 0-15) bits to align to bytes */
if (bits_left == 0) ENSURE_BITS(16);
bits_left = 0; bit_buffer = 0;
/* read 12 bytes of stored R0 / R1 / R2 values */
for (rundest = &buf[0], i = 0; i < 12; i++) {
READ_IF_NEEDED;
*rundest++ = *i_ptr++;
}
R0 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
R1 = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
R2 = buf[8] | (buf[9] << 8) | (buf[10] << 16) | (buf[11] << 24);
break;
default:
D(("bad block type"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
}
/* decode more of the block:
* run = min(what's available, what's needed) */
this_run = lzx->block_remaining;
if (this_run > bytes_todo) this_run = bytes_todo;
/* assume we decode exactly this_run bytes, for now */
bytes_todo -= this_run;
lzx->block_remaining -= this_run;
/* decode at least this_run bytes */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_VERBATIM:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1=R0; R0 = match_offset; break;
case 2: match_offset = R2; R2=R0; R0 = match_offset; break;
case 3: match_offset = 1; R2=R1; R1=R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
READ_BITS(verbatim_bits, extra);
match_offset = position_base[match_offset] - 2 + verbatim_bits;
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_ALIGNED:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1 = R0; R0 = match_offset; break;
case 2: match_offset = R2; R2 = R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
match_offset = position_base[match_offset] - 2;
if (extra > 3) {
/* verbatim and aligned bits */
extra -= 3;
READ_BITS(verbatim_bits, extra);
match_offset += (verbatim_bits << 3);
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra == 3) {
/* aligned bits only */
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra > 0) { /* extra==1, extra==2 */
/* verbatim bits only */
READ_BITS(verbatim_bits, extra);
match_offset += verbatim_bits;
}
else /* extra == 0 */ {
/* ??? not defined in LZX specification! */
match_offset = 1;
}
/* update repeated offset LRU queue */
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* as this_run is limited not to wrap a frame, this also means it
* won't wrap the window (as the window is a multiple of 32k) */
rundest = &window[window_posn];
window_posn += this_run;
while (this_run > 0) {
if ((i = i_end - i_ptr) == 0) {
READ_IF_NEEDED;
}
else {
if (i > this_run) i = this_run;
lzx->sys->copy(i_ptr, rundest, (size_t) i);
rundest += i;
i_ptr += i;
this_run -= i;
}
}
break;
default:
return lzx->error = MSPACK_ERR_DECRUNCH; /* might as well */
}
/* did the final match overrun our desired this_run length? */
if (this_run < 0) {
if ((unsigned int)(-this_run) > lzx->block_remaining) {
D(("overrun went past end of block by %d (%d remaining)",
-this_run, lzx->block_remaining ))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
lzx->block_remaining -= -this_run;
}
} /* while (bytes_todo > 0) */
/* streams don't extend over frame boundaries */
if ((window_posn - lzx->frame_posn) != frame_size) {
D(("decode beyond output frame limits! %d != %d",
window_posn - lzx->frame_posn, frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-align input bitstream */
if (bits_left > 0) ENSURE_BITS(16);
if (bits_left & 15) REMOVE_BITS(bits_left & 15);
/* check that we've used all of the previous frame first */
if (lzx->o_ptr != lzx->o_end) {
D(("%ld avail bytes, new %d frame",
(long)(lzx->o_end - lzx->o_ptr), frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* does this intel block _really_ need decoding? */
if (lzx->intel_started && lzx->intel_filesize &&
(lzx->frame <= 32768) && (frame_size > 10))
{
unsigned char *data = &lzx->e8_buf[0];
unsigned char *dataend = &lzx->e8_buf[frame_size - 10];
signed int curpos = lzx->intel_curpos;
signed int filesize = lzx->intel_filesize;
signed int abs_off, rel_off;
/* copy e8 block to the e8 buffer and tweak if needed */
lzx->o_ptr = data;
lzx->sys->copy(&lzx->window[lzx->frame_posn], data, frame_size);
while (data < dataend) {
if (*data++ != 0xE8) { curpos++; continue; }
abs_off = data[0] | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
if ((abs_off >= -curpos) && (abs_off < filesize)) {
rel_off = (abs_off >= 0) ? abs_off - curpos : abs_off + filesize;
data[0] = (unsigned char) rel_off;
data[1] = (unsigned char) (rel_off >> 8);
data[2] = (unsigned char) (rel_off >> 16);
data[3] = (unsigned char) (rel_off >> 24);
}
data += 4;
curpos += 5;
}
lzx->intel_curpos += frame_size;
}
else {
lzx->o_ptr = &lzx->window[lzx->frame_posn];
if (lzx->intel_filesize) lzx->intel_curpos += frame_size;
}
lzx->o_end = &lzx->o_ptr[frame_size];
/* write a frame */
i = (out_bytes < (off_t)frame_size) ? (unsigned int)out_bytes : frame_size;
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
/* advance frame start position */
lzx->frame_posn += frame_size;
lzx->frame++;
/* wrap window / frame position pointers */
if (window_posn == lzx->window_size) window_posn = 0;
if (lzx->frame_posn == lzx->window_size) lzx->frame_posn = 0;
} /* while (lzx->frame < end_frame) */
if (out_bytes) {
D(("bytes left to output"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* store local state */
STORE_BITS;
lzx->window_posn = window_posn;
lzx->R0 = R0;
lzx->R1 = R1;
lzx->R2 = R2;
return MSPACK_ERR_OK;
}
void lzxd_free(struct lzxd_stream *lzx) {
struct mspack_system *sys;
if (lzx) {
sys = lzx->sys;
sys->free(lzx->inbuf);
sys->free(lzx->window);
sys->free(lzx);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_1635_1 |
crossvul-cpp_data_bad_5764_0 | /*
* IPV6 GSO/GRO offload support
* Linux INET6 implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* UDPv6 GSO support
*/
#include <linux/skbuff.h>
#include <net/protocol.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
static int udp6_ufo_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
if (!pskb_may_pull(skb, sizeof(*uh)))
return -EINVAL;
if (likely(!skb->encapsulation)) {
ipv6h = ipv6_hdr(skb);
uh = udp_hdr(skb);
uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
IPPROTO_UDP, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
}
return 0;
}
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
int tnl_hlen;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
segs = skb_udp_tunnel_segment(skb, features);
else {
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb_checksum_start_offset(skb);
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);
if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
goto out;
}
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
unfrag_ip6hlen + tnl_hlen;
packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
}
out:
return segs;
}
static const struct net_offload udpv6_offload = {
.callbacks = {
.gso_send_check = udp6_ufo_send_check,
.gso_segment = udp6_ufo_fragment,
},
};
int __init udp_offload_init(void)
{
return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5764_0 |
crossvul-cpp_data_bad_3498_1 | /*
* Support for o32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
#define ELF_CLASS ELFCLASS32
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB;
#else /* __MIPSEL__ */
#define ELF_DATA ELFDATA2LSB;
#endif
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned int elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
\
__res; \
})
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
#define jiffies_to_timeval jiffies_to_compat_timeval
static inline void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
long rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#undef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
for (i = 0; i < EF_R0; i++)
grp[i] = 0;
grp[EF_R0] = 0;
for (i = 1; i <= 31; i++)
grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
grp[EF_R26] = 0;
grp[EF_R27] = 0;
grp[EF_LO] = (elf_greg_t) regs->lo;
grp[EF_HI] = (elf_greg_t) regs->hi;
grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
#ifdef EF_UNUSED0
grp[EF_UNUSED0] = 0;
#endif
}
MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
#include "../../../fs/binfmt_elf.c"
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3498_1 |
crossvul-cpp_data_good_3498_9 | /*
* SLUB: A slab allocator that limits cache line use instead of queuing
* objects in per cpu and per node lists.
*
* The allocator synchronizes using per slab locks and only
* uses a centralized lock to manage a pool of partial slabs.
*
* (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/memory.h>
#include <linux/math64.h>
/*
* Lock order:
* 1. slab_lock(page)
* 2. slab->list_lock
*
* The slab_lock protects operations on the object of a particular
* slab and its metadata in the page struct. If the slab lock
* has been taken then no allocations nor frees can be performed
* on the objects in the slab nor can the slab be added or removed
* from the partial or full lists since this would mean modifying
* the page_struct of the slab.
*
* The list_lock protects the partial and full list on each node and
* the partial slab counter. If taken then no new slabs may be added or
* removed from the lists nor make the number of partial slabs be modified.
* (Note that the total number of slabs is an atomic value that may be
* modified without taking the list lock).
*
* The list_lock is a centralized lock and thus we avoid taking it as
* much as possible. As long as SLUB does not have to handle partial
* slabs, operations can continue without any centralized lock. F.e.
* allocating a long series of objects that fill up slabs does not require
* the list lock.
*
* The lock order is sometimes inverted when we are trying to get a slab
* off a list. We take the list_lock and then look for a page on the list
* to use. While we do that objects in the slabs may be freed. We can
* only operate on the slab if we have also taken the slab_lock. So we use
* a slab_trylock() on the slab. If trylock was successful then no frees
* can occur anymore and we can use the slab for allocations etc. If the
* slab_trylock() does not succeed then frees are in progress in the slab and
* we must stay away from it for a while since we may cause a bouncing
* cacheline if we try to acquire the lock. So go onto the next slab.
* If all pages are busy then we may allocate a new slab instead of reusing
* a partial slab. A new slab has noone operating on it and thus there is
* no danger of cacheline contention.
*
* Interrupts are disabled during allocation and deallocation in order to
* make the slab allocator safe to use in the context of an irq. In addition
* interrupts are disabled to ensure that the processor does not change
* while handling per_cpu slabs, due to kernel preemption.
*
* SLUB assigns one slab for allocation to each processor.
* Allocations only occur from these slabs called cpu slabs.
*
* Slabs with free elements are kept on a partial list and during regular
* operations no list for full slabs is used. If an object in a full slab is
* freed then the slab will show up again on the partial lists.
* We track full slabs for debugging purposes though because otherwise we
* cannot scan all objects.
*
* Slabs are freed when they become empty. Teardown and setup is
* minimal so we rely on the page allocators per cpu caches for
* fast frees and allocs.
*
* Overloading of page flags that are otherwise used for LRU management.
*
* PageActive The slab is frozen and exempt from list processing.
* This means that the slab is dedicated to a purpose
* such as satisfying allocations for a specific
* processor. Objects may be freed in the slab while
* it is frozen but slab_free will then skip the usual
* list operations. It is up to the processor holding
* the slab to integrate the slab into the slab lists
* when the slab is no longer needed.
*
* One use of this flag is to mark slabs that are
* used for allocations. Then such a slab becomes a cpu
* slab. The cpu slab may be equipped with an additional
* freelist that allows lockless access to
* free objects in addition to the regular freelist
* that requires the slab lock.
*
* PageError Slab requires special handling due to debug
* options set. This moves slab handling out of
* the fast path and disables lockless freelists.
*/
#define FROZEN (1 << PG_active)
#ifdef CONFIG_SLUB_DEBUG
#define SLABDEBUG (1 << PG_error)
#else
#define SLABDEBUG 0
#endif
static inline int SlabFrozen(struct page *page)
{
return page->flags & FROZEN;
}
static inline void SetSlabFrozen(struct page *page)
{
page->flags |= FROZEN;
}
static inline void ClearSlabFrozen(struct page *page)
{
page->flags &= ~FROZEN;
}
static inline int SlabDebug(struct page *page)
{
return page->flags & SLABDEBUG;
}
static inline void SetSlabDebug(struct page *page)
{
page->flags |= SLABDEBUG;
}
static inline void ClearSlabDebug(struct page *page)
{
page->flags &= ~SLABDEBUG;
}
/*
* Issues still to be resolved:
*
* - Support PAGE_ALLOC_DEBUG. Should be easy to do.
*
* - Variable sizing of the per node arrays
*/
/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST
/*
* Mininum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
*/
#define MIN_PARTIAL 5
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
* sort the partial list by the number of objects in the.
*/
#define MAX_PARTIAL 10
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)
/*
* Set of flags that will prevent slab merging
*/
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA)
#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
static int kmem_size = sizeof(struct kmem_cache);
#ifdef CONFIG_SMP
static struct notifier_block slab_notifier;
#endif
static enum {
DOWN, /* No slab functionality available */
PARTIAL, /* kmem_cache_open() works but kmalloc does not */
UP, /* Everything works but does not show up in sysfs */
SYSFS /* Sysfs up */
} slab_state = DOWN;
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
/*
* Tracking user of a slab.
*/
struct track {
void *addr; /* Called from address */
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
};
enum track_item { TRACK_ALLOC, TRACK_FREE };
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s)
{
kfree(s);
}
#endif
static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
c->stat[si]++;
#endif
}
/********************************************************************
* Core slab cache functions
*******************************************************************/
int slab_is_available(void)
{
return slab_state >= UP;
}
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
#ifdef CONFIG_NUMA
return s->node[node];
#else
return &s->local_node;
#endif
}
static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
{
#ifdef CONFIG_SMP
return s->cpu_slab[cpu];
#else
return &s->cpu_slab;
#endif
}
/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object)
{
void *base;
if (!object)
return 1;
base = page_address(page);
if (object < base || object >= base + page->objects * s->size ||
(object - base) % s->size) {
return 0;
}
return 1;
}
/*
* Slow version of get and set free pointer.
*
* This version requires touching the cache lines of kmem_cache which
* we avoid to do in the fast alloc free paths. There we obtain the offset
* from the page struct.
*/
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
}
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
*(void **)(object + s->offset) = fp;
}
/* Loop over all objects in a slab */
#define for_each_object(__p, __s, __addr, __objects) \
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
__p += (__s)->size)
/* Scan freelist */
#define for_each_free_object(__p, __s, __free) \
for (__p = (__free); __p; __p = get_freepointer((__s), __p))
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
return (p - addr) / s->size;
}
static inline struct kmem_cache_order_objects oo_make(int order,
unsigned long size)
{
struct kmem_cache_order_objects x = {
(order << 16) + (PAGE_SIZE << order) / size
};
return x;
}
static inline int oo_order(struct kmem_cache_order_objects x)
{
return x.x >> 16;
}
static inline int oo_objects(struct kmem_cache_order_objects x)
{
return x.x & ((1 << 16) - 1);
}
#ifdef CONFIG_SLUB_DEBUG
/*
* Debug settings:
*/
#ifdef CONFIG_SLUB_DEBUG_ON
static int slub_debug = DEBUG_DEFAULT_FLAGS;
#else
static int slub_debug;
#endif
static char *slub_debug_slabs;
/*
* Object debugging
*/
static void print_section(char *text, u8 *addr, unsigned int length)
{
int i, offset;
int newline = 1;
char ascii[17];
ascii[16] = 0;
for (i = 0; i < length; i++) {
if (newline) {
printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
newline = 0;
}
printk(KERN_CONT " %02x", addr[i]);
offset = i % 16;
ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
if (offset == 15) {
printk(KERN_CONT " %s\n", ascii);
newline = 1;
}
}
if (!newline) {
i %= 16;
while (i < 16) {
printk(KERN_CONT " ");
ascii[i] = ' ';
i++;
}
printk(KERN_CONT " %s\n", ascii);
}
}
static struct track *get_track(struct kmem_cache *s, void *object,
enum track_item alloc)
{
struct track *p;
if (s->offset)
p = object + s->offset + sizeof(void *);
else
p = object + s->inuse;
return p + alloc;
}
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, void *addr)
{
struct track *p;
if (s->offset)
p = object + s->offset + sizeof(void *);
else
p = object + s->inuse;
p += alloc;
if (addr) {
p->addr = addr;
p->cpu = smp_processor_id();
p->pid = current ? current->pid : -1;
p->when = jiffies;
} else
memset(p, 0, sizeof(struct track));
}
static void init_tracking(struct kmem_cache *s, void *object)
{
if (!(s->flags & SLAB_STORE_USER))
return;
set_track(s, object, TRACK_FREE, NULL);
set_track(s, object, TRACK_ALLOC, NULL);
}
static void print_track(const char *s, struct track *t)
{
if (!t->addr)
return;
printk(KERN_ERR "INFO: %s in ", s);
__print_symbol("%s", (unsigned long)t->addr);
printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
}
static void print_tracking(struct kmem_cache *s, void *object)
{
if (!(s->flags & SLAB_STORE_USER))
return;
print_track("Allocated", get_track(s, object, TRACK_ALLOC));
print_track("Freed", get_track(s, object, TRACK_FREE));
}
static void print_page_info(struct page *page)
{
printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
page, page->objects, page->inuse, page->freelist, page->flags);
}
static void slab_bug(struct kmem_cache *s, char *fmt, ...)
{
va_list args;
char buf[100];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printk(KERN_ERR "========================================"
"=====================================\n");
printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
printk(KERN_ERR "----------------------------------------"
"-------------------------------------\n\n");
}
static void slab_fix(struct kmem_cache *s, char *fmt, ...)
{
va_list args;
char buf[100];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
}
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned int off; /* Offset of last byte */
u8 *addr = page_address(page);
print_tracking(s, p);
print_page_info(page);
printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
p, p - addr, get_freepointer(s, p));
if (p > addr + 16)
print_section("Bytes b4", p - 16, 16);
print_section("Object", p, min(s->objsize, 128));
if (s->flags & SLAB_RED_ZONE)
print_section("Redzone", p + s->objsize,
s->inuse - s->objsize);
if (s->offset)
off = s->offset + sizeof(void *);
else
off = s->inuse;
if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
if (off != s->size)
/* Beginning of the filler is the free pointer */
print_section("Padding", p + off, s->size - off);
dump_stack();
}
static void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
slab_bug(s, "%s", reason);
print_trailer(s, page, object);
}
static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
{
va_list args;
char buf[100];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
slab_bug(s, "%s", buf);
print_page_info(page);
dump_stack();
}
static void init_object(struct kmem_cache *s, void *object, int active)
{
u8 *p = object;
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->objsize - 1);
p[s->objsize - 1] = POISON_END;
}
if (s->flags & SLAB_RED_ZONE)
memset(p + s->objsize,
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
s->inuse - s->objsize);
}
static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
{
while (bytes) {
if (*start != (u8)value)
return start;
start++;
bytes--;
}
return NULL;
}
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
void *from, void *to)
{
slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
memset(from, data, to - from);
}
static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *object, char *what,
u8 *start, unsigned int value, unsigned int bytes)
{
u8 *fault;
u8 *end;
fault = check_bytes(start, value, bytes);
if (!fault)
return 1;
end = start + bytes;
while (end > fault && end[-1] == value)
end--;
slab_bug(s, "%s overwritten", what);
printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
fault, end - 1, fault[0], value);
print_trailer(s, page, object);
restore_bytes(s, what, value, fault, end);
return 0;
}
/*
* Object layout:
*
* object address
* Bytes of the object to be managed.
* If the freepointer may overlay the object then the free
* pointer is the first word of the object.
*
* Poisoning uses 0x6b (POISON_FREE) and the last byte is
* 0xa5 (POISON_END)
*
* object + s->objsize
* Padding to reach word boundary. This is also used for Redzoning.
* Padding is extended by another word if Redzoning is enabled and
* objsize == inuse.
*
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with
* 0xcc (RED_ACTIVE) for objects in use.
*
* object + s->inuse
* Meta data starts here.
*
* A. Free pointer (if we cannot overwrite object on free)
* B. Tracking data for SLAB_STORE_USER
* C. Padding to reach required alignment boundary or at mininum
* one word if debugging is on to be able to detect writes
* before the word boundary.
*
* Padding is done using 0x5a (POISON_INUSE)
*
* object + s->size
* Nothing is used beyond s->size.
*
* If slabcaches are merged then the objsize and inuse boundaries are mostly
* ignored. And therefore no slab options that rely on these boundaries
* may be used with merged slabcaches.
*/
static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned long off = s->inuse; /* The end of info */
if (s->offset)
/* Freepointer is placed after the object. */
off += sizeof(void *);
if (s->flags & SLAB_STORE_USER)
/* We also have user information there */
off += 2 * sizeof(struct track);
if (s->size == off)
return 1;
return check_bytes_and_report(s, page, p, "Object padding",
p + off, POISON_INUSE, s->size - off);
}
/* Check the pad bytes at the end of a slab page */
static int slab_pad_check(struct kmem_cache *s, struct page *page)
{
u8 *start;
u8 *fault;
u8 *end;
int length;
int remainder;
if (!(s->flags & SLAB_POISON))
return 1;
start = page_address(page);
length = (PAGE_SIZE << compound_order(page));
end = start + length;
remainder = length % s->size;
if (!remainder)
return 1;
fault = check_bytes(end - remainder, POISON_INUSE, remainder);
if (!fault)
return 1;
while (end > fault && end[-1] == POISON_INUSE)
end--;
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
print_section("Padding", end - remainder, remainder);
restore_bytes(s, "slab padding", POISON_INUSE, start, end);
return 0;
}
static int check_object(struct kmem_cache *s, struct page *page,
void *object, int active)
{
u8 *p = object;
u8 *endobject = object + s->objsize;
if (s->flags & SLAB_RED_ZONE) {
unsigned int red =
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, red, s->inuse - s->objsize))
return 0;
} else {
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
check_bytes_and_report(s, page, p, "Alignment padding",
endobject, POISON_INUSE, s->inuse - s->objsize);
}
}
if (s->flags & SLAB_POISON) {
if (!active && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
p + s->objsize - 1, POISON_END, 1)))
return 0;
/*
* check_pad_bytes cleans up on its own.
*/
check_pad_bytes(s, page, p);
}
if (!s->offset && active)
/*
* Object and freepointer overlap. Cannot check
* freepointer while object is allocated.
*/
return 1;
/* Check free pointer validity */
if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
object_err(s, page, p, "Freepointer corrupt");
/*
* No choice but to zap it and thus loose the remainder
* of the free objects in this slab. May cause
* another error because the object count is now wrong.
*/
set_freepointer(s, p, NULL);
return 0;
}
return 1;
}
static int check_slab(struct kmem_cache *s, struct page *page)
{
int maxobj;
VM_BUG_ON(!irqs_disabled());
if (!PageSlab(page)) {
slab_err(s, page, "Not a valid slab page");
return 0;
}
maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u",
s->name, page->objects, maxobj);
return 0;
}
if (page->inuse > page->objects) {
slab_err(s, page, "inuse %u > max %u",
s->name, page->inuse, page->objects);
return 0;
}
/* Slab_pad_check fixes things up after itself */
slab_pad_check(s, page);
return 1;
}
/*
* Determine if a certain object on a page is on the freelist. Must hold the
* slab lock to guarantee that the chains are in a consistent state.
*/
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
{
int nr = 0;
void *fp = page->freelist;
void *object = NULL;
unsigned long max_objects;
while (fp && nr <= page->objects) {
if (fp == search)
return 1;
if (!check_valid_pointer(s, page, fp)) {
if (object) {
object_err(s, page, object,
"Freechain corrupt");
set_freepointer(s, object, NULL);
break;
} else {
slab_err(s, page, "Freepointer corrupt");
page->freelist = NULL;
page->inuse = page->objects;
slab_fix(s, "Freelist cleared");
return 0;
}
break;
}
object = fp;
fp = get_freepointer(s, object);
nr++;
}
max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
if (max_objects > 65535)
max_objects = 65535;
if (page->objects != max_objects) {
slab_err(s, page, "Wrong number of objects. Found %d but "
"should be %d", page->objects, max_objects);
page->objects = max_objects;
slab_fix(s, "Number of objects adjusted.");
}
if (page->inuse != page->objects - nr) {
slab_err(s, page, "Wrong object count. Counter is %d but "
"counted were %d", page->inuse, page->objects - nr);
page->inuse = page->objects - nr;
slab_fix(s, "Object count adjusted.");
}
return search == NULL;
}
static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
{
if (s->flags & SLAB_TRACE) {
printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
s->name,
alloc ? "alloc" : "free",
object, page->inuse,
page->freelist);
if (!alloc)
print_section("Object", (void *)object, s->objsize);
dump_stack();
}
}
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
static void add_full(struct kmem_cache_node *n, struct page *page)
{
spin_lock(&n->list_lock);
list_add(&page->lru, &n->full);
spin_unlock(&n->list_lock);
}
static void remove_full(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n;
if (!(s->flags & SLAB_STORE_USER))
return;
n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
list_del(&page->lru);
spin_unlock(&n->list_lock);
}
/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);
return atomic_long_read(&n->nr_slabs);
}
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);
/*
* May be called early in order to allocate a slab for the
* kmem_cache_node structure. Solve the chicken-egg
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
if (!NUMA_BUILD || n) {
atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects);
}
}
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);
atomic_long_dec(&n->nr_slabs);
atomic_long_sub(objects, &n->total_objects);
}
/* Object debug checks for alloc/free paths */
static void setup_object_debug(struct kmem_cache *s, struct page *page,
void *object)
{
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
return;
init_object(s, object, 0);
init_tracking(s, object);
}
static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
void *object, void *addr)
{
if (!check_slab(s, page))
goto bad;
if (!on_freelist(s, page, object)) {
object_err(s, page, object, "Object already allocated");
goto bad;
}
if (!check_valid_pointer(s, page, object)) {
object_err(s, page, object, "Freelist Pointer check fails");
goto bad;
}
if (!check_object(s, page, object, 0))
goto bad;
/* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_ALLOC, addr);
trace(s, page, object, 1);
init_object(s, object, 1);
return 1;
bad:
if (PageSlab(page)) {
/*
* If this is a slab page then lets do the best we can
* to avoid issues in the future. Marking all objects
* as used avoids touching the remaining objects.
*/
slab_fix(s, "Marking all objects used");
page->inuse = page->objects;
page->freelist = NULL;
}
return 0;
}
static int free_debug_processing(struct kmem_cache *s, struct page *page,
void *object, void *addr)
{
if (!check_slab(s, page))
goto fail;
if (!check_valid_pointer(s, page, object)) {
slab_err(s, page, "Invalid object pointer 0x%p", object);
goto fail;
}
if (on_freelist(s, page, object)) {
object_err(s, page, object, "Object already free");
goto fail;
}
if (!check_object(s, page, object, 1))
return 0;
if (unlikely(s != page->slab)) {
if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
} else if (!page->slab) {
printk(KERN_ERR
"SLUB <none>: no slab for object 0x%p.\n",
object);
dump_stack();
} else
object_err(s, page, object,
"page slab pointer corrupt.");
goto fail;
}
/* Special debug activities for freeing objects */
if (!SlabFrozen(page) && !page->freelist)
remove_full(s, page);
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
init_object(s, object, 0);
return 1;
fail:
slab_fix(s, "Object at 0x%p not freed", object);
return 0;
}
static int __init setup_slub_debug(char *str)
{
slub_debug = DEBUG_DEFAULT_FLAGS;
if (*str++ != '=' || !*str)
/*
* No options specified. Switch on full debugging.
*/
goto out;
if (*str == ',')
/*
* No options but restriction on slabs. This means full
* debugging for slabs matching a pattern.
*/
goto check_slabs;
slub_debug = 0;
if (*str == '-')
/*
* Switch off all debugging measures.
*/
goto out;
/*
* Determine which debug features should be switched on
*/
for (; *str && *str != ','; str++) {
switch (tolower(*str)) {
case 'f':
slub_debug |= SLAB_DEBUG_FREE;
break;
case 'z':
slub_debug |= SLAB_RED_ZONE;
break;
case 'p':
slub_debug |= SLAB_POISON;
break;
case 'u':
slub_debug |= SLAB_STORE_USER;
break;
case 't':
slub_debug |= SLAB_TRACE;
break;
default:
printk(KERN_ERR "slub_debug option '%c' "
"unknown. skipped\n", *str);
}
}
check_slabs:
if (*str == ',')
slub_debug_slabs = str + 1;
out:
return 1;
}
__setup("slub_debug", setup_slub_debug);
static unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
void (*ctor)(struct kmem_cache *, void *))
{
/*
* Enable debugging if selected on the kernel commandline.
*/
if (slub_debug && (!slub_debug_slabs ||
strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
flags |= slub_debug;
return flags;
}
#else
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, void *addr) { return 0; }
static inline int free_debug_processing(struct kmem_cache *s,
struct page *page, void *object, void *addr) { return 0; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
void (*ctor)(struct kmem_cache *, void *))
{
return flags;
}
#define slub_debug 0
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
#endif
/*
* Slab allocation and freeing
*/
static inline struct page *alloc_slab_page(gfp_t flags, int node,
struct kmem_cache_order_objects oo)
{
int order = oo_order(oo);
if (node == -1)
return alloc_pages(flags, order);
else
return alloc_pages_node(node, flags, order);
}
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
flags |= s->allocflags;
page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
oo);
if (unlikely(!page)) {
oo = s->min;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
page = alloc_slab_page(flags, node, oo);
if (!page)
return NULL;
stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
}
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
return page;
}
static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
if (unlikely(s->ctor))
s->ctor(s, object);
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
void *start;
void *last;
void *p;
BUG_ON(flags & GFP_SLAB_BUG_MASK);
page = allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
if (!page)
goto out;
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab = s;
page->flags |= 1 << PG_slab;
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_TRACE))
SetSlabDebug(page);
start = page_address(page);
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
last = start;
for_each_object(p, s, start, page->objects) {
setup_object(s, page, last);
set_freepointer(s, last, p);
last = p;
}
setup_object(s, page, last);
set_freepointer(s, last, NULL);
page->freelist = start;
page->inuse = 0;
out:
return page;
}
static void __free_slab(struct kmem_cache *s, struct page *page)
{
int order = compound_order(page);
int pages = 1 << order;
if (unlikely(SlabDebug(page))) {
void *p;
slab_pad_check(s, page);
for_each_object(p, s, page_address(page),
page->objects)
check_object(s, page, p, 0);
ClearSlabDebug(page);
}
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
__ClearPageSlab(page);
reset_page_mapcount(page);
__free_pages(page, order);
}
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page;
page = container_of((struct list_head *)h, struct page, lru);
__free_slab(page->slab, page);
}
static void free_slab(struct kmem_cache *s, struct page *page)
{
if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
/*
* RCU free overloads the RCU head over the LRU
*/
struct rcu_head *head = (void *)&page->lru;
call_rcu(head, rcu_free_slab);
} else
__free_slab(s, page);
}
static void discard_slab(struct kmem_cache *s, struct page *page)
{
dec_slabs_node(s, page_to_nid(page), page->objects);
free_slab(s, page);
}
/*
* Per slab locking using the pagelock
*/
static __always_inline void slab_lock(struct page *page)
{
bit_spin_lock(PG_locked, &page->flags);
}
static __always_inline void slab_unlock(struct page *page)
{
__bit_spin_unlock(PG_locked, &page->flags);
}
static __always_inline int slab_trylock(struct page *page)
{
int rc = 1;
rc = bit_spin_trylock(PG_locked, &page->flags);
return rc;
}
/*
* Management of partially allocated slabs
*/
static void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
spin_lock(&n->list_lock);
n->nr_partial++;
if (tail)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
spin_unlock(&n->list_lock);
}
static void remove_partial(struct kmem_cache *s,
struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
list_del(&page->lru);
n->nr_partial--;
spin_unlock(&n->list_lock);
}
/*
* Lock slab and remove from the partial list.
*
* Must hold list_lock.
*/
static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
{
if (slab_trylock(page)) {
list_del(&page->lru);
n->nr_partial--;
SetSlabFrozen(page);
return 1;
}
return 0;
}
/*
* Try to allocate a partial slab from a specific node.
*/
static struct page *get_partial_node(struct kmem_cache_node *n)
{
struct page *page;
/*
* Racy check. If we mistakenly see no partial slabs then we
* just allocate an empty slab. If we mistakenly try to get a
* partial slab and there is none available then get_partials()
* will return NULL.
*/
if (!n || !n->nr_partial)
return NULL;
spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
if (lock_and_freeze_slab(n, page))
goto out;
page = NULL;
out:
spin_unlock(&n->list_lock);
return page;
}
/*
* Get a page from somewhere. Search in increasing NUMA distances.
*/
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
struct page *page;
/*
* The defrag ratio allows a configuration of the tradeoffs between
* inter node defragmentation and node local allocations. A lower
* defrag_ratio increases the tendency to do local allocations
* instead of attempting to obtain partial slabs from other nodes.
*
* If the defrag_ratio is set to 0 then kmalloc() always
* returns node local objects. If the ratio is higher then kmalloc()
* may return off node objects because partial slabs are obtained
* from other nodes and filled up.
*
* If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
* defrag_ratio = 1000) then every (well almost) allocation will
* first attempt to defrag slab caches on other nodes. This means
* scanning over all nodes to look for partial slabs which may be
* expensive if we do it every time we are trying to find a slab
* with available objects.
*/
if (!s->remote_node_defrag_ratio ||
get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
struct kmem_cache_node *n;
n = get_node(s, zone_to_nid(zone));
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > MIN_PARTIAL) {
page = get_partial_node(n);
if (page)
return page;
}
}
#endif
return NULL;
}
/*
* Get a partial page, lock it and return it.
*/
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
int searchnode = (node == -1) ? numa_node_id() : node;
page = get_partial_node(get_node(s, searchnode));
if (page || (flags & __GFP_THISNODE))
return page;
return get_any_partial(s, flags);
}
/*
* Move a page back to the lists.
*
* Must be called with the slab lock held.
*
* On exit the slab lock will have been dropped.
*/
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
ClearSlabFrozen(page);
if (page->inuse) {
if (page->freelist) {
add_partial(n, page, tail);
stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else {
stat(c, DEACTIVATE_FULL);
if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
add_full(n, page);
}
slab_unlock(page);
} else {
stat(c, DEACTIVATE_EMPTY);
if (n->nr_partial < MIN_PARTIAL) {
/*
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs
* to come after the other slabs with objects in
* so that the others get filled first. That way the
* size of the partial list stays small.
*
* kmem_cache_shrink can reclaim any empty slabs from the
* partial list.
*/
add_partial(n, page, 1);
slab_unlock(page);
} else {
slab_unlock(page);
stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
discard_slab(s, page);
}
}
}
/*
* Remove the cpu slab
*/
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
struct page *page = c->page;
int tail = 1;
if (page->freelist)
stat(c, DEACTIVATE_REMOTE_FREES);
/*
* Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely
* to occur.
*/
while (unlikely(c->freelist)) {
void **object;
tail = 0; /* Hot objects. Put the slab first */
/* Retrieve object from cpu_freelist */
object = c->freelist;
c->freelist = c->freelist[c->offset];
/* And put onto the regular freelist */
object[c->offset] = page->freelist;
page->freelist = object;
page->inuse--;
}
c->page = NULL;
unfreeze_slab(s, page, tail);
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(c, CPUSLAB_FLUSH);
slab_lock(c->page);
deactivate_slab(s, c);
}
/*
* Flush cpu slab.
*
* Called from IPI handler with interrupts disabled.
*/
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (likely(c && c->page))
flush_slab(s, c);
}
static void flush_cpu_slab(void *d)
{
struct kmem_cache *s = d;
__flush_cpu_slab(s, smp_processor_id());
}
static void flush_all(struct kmem_cache *s)
{
#ifdef CONFIG_SMP
on_each_cpu(flush_cpu_slab, s, 1, 1);
#else
unsigned long flags;
local_irq_save(flags);
flush_cpu_slab(s);
local_irq_restore(flags);
#endif
}
/*
* Check if the objects in a per cpu structure fit numa
* locality expectations.
*/
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
if (node != -1 && c->node != node)
return 0;
#endif
return 1;
}
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
*
* Interrupts are disabled.
*
* Processing is still very fast if new objects have been freed to the
* regular freelist. In that case we simply take over the regular freelist
* as the lockless freelist and zap the regular freelist.
*
* If that is not working then we fall back to the partial lists. We take the
* first element of the freelist as the object to allocate now and move the
* rest of the freelist to the lockless freelist.
*
* And if we were unable to get a new slab from the partial slab lists then
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*/
static void *__slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
{
void **object;
struct page *new;
/* We handle __GFP_ZERO in the caller */
gfpflags &= ~__GFP_ZERO;
if (!c->page)
goto new_slab;
slab_lock(c->page);
if (unlikely(!node_match(c, node)))
goto another_slab;
stat(c, ALLOC_REFILL);
load_freelist:
object = c->page->freelist;
if (unlikely(!object))
goto another_slab;
if (unlikely(SlabDebug(c->page)))
goto debug;
c->freelist = object[c->offset];
c->page->inuse = c->page->objects;
c->page->freelist = NULL;
c->node = page_to_nid(c->page);
unlock_out:
slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH);
return object;
another_slab:
deactivate_slab(s, c);
new_slab:
new = get_partial(s, gfpflags, node);
if (new) {
c->page = new;
stat(c, ALLOC_FROM_PARTIAL);
goto load_freelist;
}
if (gfpflags & __GFP_WAIT)
local_irq_enable();
new = new_slab(s, gfpflags, node);
if (gfpflags & __GFP_WAIT)
local_irq_disable();
if (new) {
c = get_cpu_slab(s, smp_processor_id());
stat(c, ALLOC_SLAB);
if (c->page)
flush_slab(s, c);
slab_lock(new);
SetSlabFrozen(new);
c->page = new;
goto load_freelist;
}
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
goto another_slab;
c->page->inuse++;
c->page->freelist = object[c->offset];
c->node = -1;
goto unlock_out;
}
/*
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
* have the fastpath folded into their functions. So no function call
* overhead for requests that can be satisfied on the fastpath.
*
* The fastpath works by first checking if the lockless freelist can be used.
* If not then __slab_alloc is called for slow processing.
*
* Otherwise we can simply pick the next object from the lockless free list.
*/
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr)
{
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
if (unlikely(!c->freelist || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
object = c->freelist;
c->freelist = object[c->offset];
stat(c, ALLOC_FASTPATH);
}
local_irq_restore(flags);
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, c->objsize);
return object;
}
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
/*
* Slow patch handling. This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads.
*
* So we still attempt to reduce cache line usage. Just take the slab
* lock and free the item. If there is no additional partial page
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
void *x, void *addr, unsigned int offset)
{
void *prior;
void **object = (void *)x;
struct kmem_cache_cpu *c;
c = get_cpu_slab(s, raw_smp_processor_id());
stat(c, FREE_SLOWPATH);
slab_lock(page);
if (unlikely(SlabDebug(page)))
goto debug;
checks_ok:
prior = object[offset] = page->freelist;
page->freelist = object;
page->inuse--;
if (unlikely(SlabFrozen(page))) {
stat(c, FREE_FROZEN);
goto out_unlock;
}
if (unlikely(!page->inuse))
goto slab_empty;
/*
* Objects left in the slab. If it was not on the partial list before
* then add it.
*/
if (unlikely(!prior)) {
add_partial(get_node(s, page_to_nid(page)), page, 1);
stat(c, FREE_ADD_PARTIAL);
}
out_unlock:
slab_unlock(page);
return;
slab_empty:
if (prior) {
/*
* Slab still on the partial list.
*/
remove_partial(s, page);
stat(c, FREE_REMOVE_PARTIAL);
}
slab_unlock(page);
stat(c, FREE_SLAB);
discard_slab(s, page);
return;
debug:
if (!free_debug_processing(s, page, x, addr))
goto out_unlock;
goto checks_ok;
}
/*
* Fastpath with forced inlining to produce a kfree and kmem_cache_free that
* can perform fastpath freeing without additional function calls.
*
* The fastpath is only possible if we are freeing to the current cpu slab
* of this processor. This typically the case if we have just allocated
* the item before.
*
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing.
*/
static __always_inline void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
struct kmem_cache_cpu *c;
unsigned long flags;
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
stat(c, FREE_FASTPATH);
} else
__slab_free(s, page, x, addr, c->offset);
local_irq_restore(flags);
}
void kmem_cache_free(struct kmem_cache *s, void *x)
{
struct page *page;
page = virt_to_head_page(x);
slab_free(s, page, x, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_free);
/* Figure out on which slab object the object resides */
static struct page *get_object_page(const void *x)
{
struct page *page = virt_to_head_page(x);
if (!PageSlab(page))
return NULL;
return page;
}
/*
* Object placement in a slab is made very easy because we always start at
* offset 0. If we tune the size of the object to the alignment then we can
* get the required alignment by putting one properly sized object after
* another.
*
* Notice that the allocation order determines the sizes of the per cpu
* caches. Each processor has always one slab available for allocations.
* Increasing the allocation order reduces the number of times that slabs
* must be moved on and off the partial lists and is therefore a factor in
* locking overhead.
*/
/*
* Mininum / Maximum order of slab pages. This influences locking overhead
* and slab fragmentation. A higher order reduces the number of partial slabs
* and increases the number of allocations possible without having to
* take the list_lock.
*/
static int slub_min_order;
static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
static int slub_min_objects;
/*
* Merge control. If this is set then no merging of slab caches will occur.
* (Could be removed. This was introduced to pacify the merge skeptics.)
*/
static int slub_nomerge;
/*
* Calculate the order of allocation given an slab object size.
*
* The order of allocation has significant impact on performance and other
* system components. Generally order 0 allocations should be preferred since
* order 0 does not cause fragmentation in the page allocator. Larger objects
* be problematic to put into order 0 slabs because there may be too much
* unused space left. We go to a higher order if more than 1/16th of the slab
* would be wasted.
*
* In order to reach satisfactory performance we must ensure that a minimum
* number of objects is in one slab. Otherwise we may generate too much
* activity on the partial lists which requires taking the list_lock. This is
* less a concern for large slabs though which are rarely used.
*
* slub_max_order specifies the order where we begin to stop considering the
* number of objects in a slab as critical. If we reach slub_max_order then
* we try to keep the page order as low as possible. So we accept more waste
* of space in favor of a small page order.
*
* Higher order allocations also allow the placement of more objects in a
* slab and thereby reduce object handling overhead. If the user has
* requested a higher mininum order then we start with that one instead of
* the smallest order which will fit the object.
*/
static inline int slab_order(int size, int min_objects,
int max_order, int fract_leftover)
{
int order;
int rem;
int min_order = slub_min_order;
if ((PAGE_SIZE << min_order) / size > 65535)
return get_order(size * 65535) - 1;
for (order = max(min_order,
fls(min_objects * size - 1) - PAGE_SHIFT);
order <= max_order; order++) {
unsigned long slab_size = PAGE_SIZE << order;
if (slab_size < min_objects * size)
continue;
rem = slab_size % size;
if (rem <= slab_size / fract_leftover)
break;
}
return order;
}
static inline int calculate_order(int size)
{
int order;
int min_objects;
int fraction;
/*
* Attempt to find best configuration for a slab. This
* works by first attempting to generate a layout with
* the best configuration and backing off gradually.
*
* First we reduce the acceptable waste in a slab. Then
* we reduce the minimum objects required in a slab.
*/
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
order = slab_order(size, min_objects,
slub_max_order, fraction);
if (order <= slub_max_order)
return order;
fraction /= 2;
}
min_objects /= 2;
}
/*
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
order = slab_order(size, 1, slub_max_order, 1);
if (order <= slub_max_order)
return order;
/*
* Doh this slab cannot be placed using slub_max_order.
*/
order = slab_order(size, 1, MAX_ORDER, 1);
if (order <= MAX_ORDER)
return order;
return -ENOSYS;
}
/*
* Figure out what the alignment of the objects will be.
*/
static unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size)
{
/*
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large.
*
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if (flags & SLAB_HWCACHE_ALIGN) {
unsigned long ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
align = max(align, ralign);
}
if (align < ARCH_SLAB_MINALIGN)
align = ARCH_SLAB_MINALIGN;
return ALIGN(align, sizeof(void *));
}
static void init_kmem_cache_cpu(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
c->page = NULL;
c->freelist = NULL;
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
#ifdef CONFIG_SLUB_STATS
memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
#endif
}
static void init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
INIT_LIST_HEAD(&n->full);
#endif
}
#ifdef CONFIG_SMP
/*
* Per cpu array for per cpu structures.
*
* The per cpu array places all kmem_cache_cpu structures from one processor
* close together meaning that it becomes possible that multiple per cpu
* structures are contained in one cacheline. This may be particularly
* beneficial for the kmalloc caches.
*
* A desktop system typically has around 60-80 slabs. With 100 here we are
* likely able to get per cpu structures for all caches from the array defined
* here. We must be able to cover all kmalloc caches during bootstrap.
*
* If the per cpu array is exhausted then fall back to kmalloc
* of individual cachelines. No sharing is possible then.
*/
#define NR_KMEM_CACHE_CPU 100
static DEFINE_PER_CPU(struct kmem_cache_cpu,
kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
int cpu, gfp_t flags)
{
struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
if (c)
per_cpu(kmem_cache_cpu_free, cpu) =
(void *)c->freelist;
else {
/* Table overflow: So allocate ourselves */
c = kmalloc_node(
ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
flags, cpu_to_node(cpu));
if (!c)
return NULL;
}
init_kmem_cache_cpu(s, c);
return c;
}
static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
{
if (c < per_cpu(kmem_cache_cpu, cpu) ||
c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
kfree(c);
return;
}
c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
per_cpu(kmem_cache_cpu_free, cpu) = c;
}
static void free_kmem_cache_cpus(struct kmem_cache *s)
{
int cpu;
for_each_online_cpu(cpu) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (c) {
s->cpu_slab[cpu] = NULL;
free_kmem_cache_cpu(c, cpu);
}
}
}
static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
int cpu;
for_each_online_cpu(cpu) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (c)
continue;
c = alloc_kmem_cache_cpu(s, cpu, flags);
if (!c) {
free_kmem_cache_cpus(s);
return 0;
}
s->cpu_slab[cpu] = c;
}
return 1;
}
/*
* Initialize the per cpu array.
*/
static void init_alloc_cpu_cpu(int cpu)
{
int i;
if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
return;
for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
cpu_set(cpu, kmem_cach_cpu_free_init_once);
}
static void __init init_alloc_cpu(void)
{
int cpu;
for_each_online_cpu(cpu)
init_alloc_cpu_cpu(cpu);
}
#else
static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
static inline void init_alloc_cpu(void) {}
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
init_kmem_cache_cpu(s, &s->cpu_slab);
return 1;
}
#endif
#ifdef CONFIG_NUMA
/*
* No kmalloc_node yet so do it by hand. We know that this is the first
* slab on the node for this slabcache. There are no concurrent accesses
* possible.
*
* Note that this function only works on the kmalloc_node_cache
* when allocating for the kmalloc_node_cache. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
int node)
{
struct page *page;
struct kmem_cache_node *n;
unsigned long flags;
BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
page = new_slab(kmalloc_caches, gfpflags, node);
BUG_ON(!page);
if (page_to_nid(page) != node) {
printk(KERN_ERR "SLUB: Unable to allocate memory from "
"node %d\n", node);
printk(KERN_ERR "SLUB: Allocating a useless per node structure "
"in order to be able to continue\n");
}
n = page->freelist;
BUG_ON(!n);
page->freelist = get_freepointer(kmalloc_caches, n);
page->inuse++;
kmalloc_caches->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
init_object(kmalloc_caches, n, 1);
init_tracking(kmalloc_caches, n);
#endif
init_kmem_cache_node(n);
inc_slabs_node(kmalloc_caches, node, page->objects);
/*
* lockdep requires consistent irq usage for each lock
* so even though there cannot be a race this early in
* the boot sequence, we still disable irqs.
*/
local_irq_save(flags);
add_partial(n, page, 0);
local_irq_restore(flags);
return n;
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = s->node[node];
if (n && n != &s->local_node)
kmem_cache_free(kmalloc_caches, n);
s->node[node] = NULL;
}
}
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
{
int node;
int local_node;
if (slab_state >= UP)
local_node = page_to_nid(virt_to_page(s));
else
local_node = 0;
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n;
if (local_node == node)
n = &s->local_node;
else {
if (slab_state == DOWN) {
n = early_kmem_cache_node_alloc(gfpflags,
node);
continue;
}
n = kmem_cache_alloc_node(kmalloc_caches,
gfpflags, node);
if (!n) {
free_kmem_cache_nodes(s);
return 0;
}
}
s->node[node] = n;
init_kmem_cache_node(n);
}
return 1;
}
#else
static void free_kmem_cache_nodes(struct kmem_cache *s)
{
}
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
{
init_kmem_cache_node(&s->local_node);
return 1;
}
#endif
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
unsigned long flags = s->flags;
unsigned long size = s->objsize;
unsigned long align = s->align;
int order;
/*
* Round up object size to the next word boundary. We can only
* place the free pointer at word boundaries and this determines
* the possible location of the free pointer.
*/
size = ALIGN(size, sizeof(void *));
#ifdef CONFIG_SLUB_DEBUG
/*
* Determine if we can poison the object itself. If the user of
* the slab may touch the object after free or before allocation
* then we should never poison the object itself.
*/
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
!s->ctor)
s->flags |= __OBJECT_POISON;
else
s->flags &= ~__OBJECT_POISON;
/*
* If we are Redzoning then check if there is some space between the
* end of the object and the free pointer. If not then add an
* additional word to have some bytes to store Redzone information.
*/
if ((flags & SLAB_RED_ZONE) && size == s->objsize)
size += sizeof(void *);
#endif
/*
* With that we have determined the number of bytes in actual use
* by the object. This is the potential offset to the free pointer.
*/
s->inuse = size;
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
s->ctor)) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
* kmem_cache_free.
*
* This is the case if we do RCU, have a constructor or
* destructor or are poisoning the objects.
*/
s->offset = size;
size += sizeof(void *);
}
#ifdef CONFIG_SLUB_DEBUG
if (flags & SLAB_STORE_USER)
/*
* Need to store information about allocs and frees after
* the object.
*/
size += 2 * sizeof(struct track);
if (flags & SLAB_RED_ZONE)
/*
* Add some empty padding so that we can catch
* overwrites from earlier objects rather than let
* tracking information or the free pointer be
* corrupted if an user writes before the start
* of the object.
*/
size += sizeof(void *);
#endif
/*
* Determine the alignment based on various parameters that the
* user specified and the dynamic determination of cache line size
* on bootup.
*/
align = calculate_alignment(flags, align, s->objsize);
/*
* SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment.
*/
size = ALIGN(size, align);
s->size = size;
if (forced_order >= 0)
order = forced_order;
else
order = calculate_order(size);
if (order < 0)
return 0;
s->allocflags = 0;
if (order)
s->allocflags |= __GFP_COMP;
if (s->flags & SLAB_CACHE_DMA)
s->allocflags |= SLUB_DMA;
if (s->flags & SLAB_RECLAIM_ACCOUNT)
s->allocflags |= __GFP_RECLAIMABLE;
/*
* Determine the number of objects per slab
*/
s->oo = oo_make(order, size);
s->min = oo_make(get_order(size), size);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
return !!oo_objects(s->oo);
}
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(struct kmem_cache *, void *))
{
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
s->objsize = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
if (!calculate_sizes(s, -1))
goto error;
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 100;
#endif
if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
goto error;
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
return 1;
free_kmem_cache_nodes(s);
error:
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)size, s->size, oo_order(s->oo),
s->offset, flags);
return 0;
}
/*
* Check if a given pointer is valid
*/
int kmem_ptr_validate(struct kmem_cache *s, const void *object)
{
struct page *page;
page = get_object_page(object);
if (!page || s != page->slab)
/* No slab or wrong slab */
return 0;
if (!check_valid_pointer(s, page, object))
return 0;
/*
* We could also check if the object is on the slabs freelist.
* But this would be too expensive and it seems that the main
* purpose of kmem_ptr_valid() is to check if the object belongs
* to a certain slab.
*/
return 1;
}
EXPORT_SYMBOL(kmem_ptr_validate);
/*
* Determine the size of a slab object
*/
unsigned int kmem_cache_size(struct kmem_cache *s)
{
return s->objsize;
}
EXPORT_SYMBOL(kmem_cache_size);
const char *kmem_cache_name(struct kmem_cache *s)
{
return s->name;
}
EXPORT_SYMBOL(kmem_cache_name);
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
DECLARE_BITMAP(map, page->objects);
bitmap_zero(map, page->objects);
slab_err(s, page, "%s", text);
slab_lock(page);
for_each_free_object(p, s, page->freelist)
set_bit(slab_index(p, s, addr), map);
for_each_object(p, s, addr, page->objects) {
if (!test_bit(slab_index(p, s, addr), map)) {
printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
p, p - addr);
print_tracking(s, p);
}
}
slab_unlock(page);
#endif
}
/*
* Attempt to free all partial slabs on a node.
*/
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{
unsigned long flags;
struct page *page, *h;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
list_del(&page->lru);
discard_slab(s, page);
n->nr_partial--;
} else {
list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()");
}
}
spin_unlock_irqrestore(&n->list_lock, flags);
}
/*
* Release all resources used by a slab cache.
*/
static inline int kmem_cache_close(struct kmem_cache *s)
{
int node;
flush_all(s);
/* Attempt to free all objects */
free_kmem_cache_cpus(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
free_partial(s, n);
if (n->nr_partial || slabs_node(s, node))
return 1;
}
free_kmem_cache_nodes(s);
return 0;
}
/*
* Close a cache and release the kmem_cache structure
* (must be used for caches created using kmem_cache_create)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
down_write(&slub_lock);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
up_write(&slub_lock);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
dump_stack();
}
sysfs_slab_remove(s);
} else
up_write(&slub_lock);
}
EXPORT_SYMBOL(kmem_cache_destroy);
/********************************************************************
* Kmalloc subsystem
*******************************************************************/
struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
{
get_option(&str, &slub_min_order);
return 1;
}
__setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, &slub_max_order);
return 1;
}
__setup("slub_max_order=", setup_slub_max_order);
static int __init setup_slub_min_objects(char *str)
{
get_option(&str, &slub_min_objects);
return 1;
}
__setup("slub_min_objects=", setup_slub_min_objects);
static int __init setup_slub_nomerge(char *str)
{
slub_nomerge = 1;
return 1;
}
__setup("slub_nomerge", setup_slub_nomerge);
static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
const char *name, int size, gfp_t gfp_flags)
{
unsigned int flags = 0;
if (gfp_flags & SLUB_DMA)
flags = SLAB_CACHE_DMA;
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
goto panic;
list_add(&s->list, &slab_caches);
up_write(&slub_lock);
if (sysfs_slab_add(s))
goto panic;
return s;
panic:
panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
}
#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
static void sysfs_add_func(struct work_struct *w)
{
struct kmem_cache *s;
down_write(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
if (s->flags & __SYSFS_ADD_DEFERRED) {
s->flags &= ~__SYSFS_ADD_DEFERRED;
sysfs_slab_add(s);
}
}
up_write(&slub_lock);
}
static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
{
struct kmem_cache *s;
char *text;
size_t realsize;
s = kmalloc_caches_dma[index];
if (s)
return s;
/* Dynamically create dma cache */
if (flags & __GFP_WAIT)
down_write(&slub_lock);
else {
if (!down_write_trylock(&slub_lock))
goto out;
}
if (kmalloc_caches_dma[index])
goto unlock_out;
realsize = kmalloc_caches[index].objsize;
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN,
SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
kfree(s);
kfree(text);
goto unlock_out;
}
list_add(&s->list, &slab_caches);
kmalloc_caches_dma[index] = s;
schedule_work(&sysfs_add_work);
unlock_out:
up_write(&slub_lock);
out:
return kmalloc_caches_dma[index];
}
#endif
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
static s8 size_index[24] = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
5, /* 32 */
6, /* 40 */
6, /* 48 */
6, /* 56 */
6, /* 64 */
1, /* 72 */
1, /* 80 */
1, /* 88 */
1, /* 96 */
7, /* 104 */
7, /* 112 */
7, /* 120 */
7, /* 128 */
2, /* 136 */
2, /* 144 */
2, /* 152 */
2, /* 160 */
2, /* 168 */
2, /* 176 */
2, /* 184 */
2 /* 192 */
};
static struct kmem_cache *get_slab(size_t size, gfp_t flags)
{
int index;
if (size <= 192) {
if (!size)
return ZERO_SIZE_PTR;
index = size_index[(size - 1) / 8];
} else
index = fls(size - 1);
#ifdef CONFIG_ZONE_DMA
if (unlikely((flags & SLUB_DMA)))
return dma_kmalloc_cache(index, flags);
#endif
return &kmalloc_caches[index];
}
void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE))
return kmalloc_large(size, flags);
s = get_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc);
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
get_order(size));
if (page)
return page_address(page);
else
return NULL;
}
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE))
return kmalloc_large_node(size, flags, node);
s = get_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, node, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
size_t ksize(const void *object)
{
struct page *page;
struct kmem_cache *s;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
page = virt_to_head_page(object);
if (unlikely(!PageSlab(page)))
return PAGE_SIZE << compound_order(page);
s = page->slab;
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->objsize;
#endif
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
*/
return s->size;
}
EXPORT_SYMBOL(ksize);
void kfree(const void *x)
{
struct page *page;
void *object = (void *)x;
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
put_page(page);
return;
}
slab_free(page->slab, page, object, __builtin_return_address(0));
}
EXPORT_SYMBOL(kfree);
/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
* most items in use come first. New allocations will then fill those up
* and thus they can be removed from the partial lists.
*
* The slabs with the least items are placed last. This results in them
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
int kmem_cache_shrink(struct kmem_cache *s)
{
int node;
int i;
struct kmem_cache_node *n;
struct page *page;
struct page *t;
int objects = oo_objects(s->max);
struct list_head *slabs_by_inuse =
kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
unsigned long flags;
if (!slabs_by_inuse)
return -ENOMEM;
flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
n = get_node(s, node);
if (!n->nr_partial)
continue;
for (i = 0; i < objects; i++)
INIT_LIST_HEAD(slabs_by_inuse + i);
spin_lock_irqsave(&n->list_lock, flags);
/*
* Build lists indexed by the items in use in each slab.
*
* Note that concurrent frees may occur while we hold the
* list_lock. page->inuse here is the upper limit.
*/
list_for_each_entry_safe(page, t, &n->partial, lru) {
if (!page->inuse && slab_trylock(page)) {
/*
* Must hold slab lock here because slab_free
* may have freed the last object and be
* waiting to release the slab.
*/
list_del(&page->lru);
n->nr_partial--;
slab_unlock(page);
discard_slab(s, page);
} else {
list_move(&page->lru,
slabs_by_inuse + page->inuse);
}
}
/*
* Rebuild the partial list with the slabs filled up most
* first and the least used slabs at the end.
*/
for (i = objects - 1; i >= 0; i--)
list_splice(slabs_by_inuse + i, n->partial.prev);
spin_unlock_irqrestore(&n->list_lock, flags);
}
kfree(slabs_by_inuse);
return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
static int slab_mem_going_offline_callback(void *arg)
{
struct kmem_cache *s;
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list)
kmem_cache_shrink(s);
up_read(&slub_lock);
return 0;
}
static void slab_mem_offline_callback(void *arg)
{
struct kmem_cache_node *n;
struct kmem_cache *s;
struct memory_notify *marg = arg;
int offline_node;
offline_node = marg->status_change_nid;
/*
* If the node still has available memory. we need kmem_cache_node
* for it yet.
*/
if (offline_node < 0)
return;
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
n = get_node(s, offline_node);
if (n) {
/*
* if n->nr_slabs > 0, slabs still exist on the node
* that is going down. We were unable to free them,
* and offline_pages() function shoudn't call this
* callback. So, we must fail.
*/
BUG_ON(slabs_node(s, offline_node));
s->node[offline_node] = NULL;
kmem_cache_free(kmalloc_caches, n);
}
}
up_read(&slub_lock);
}
static int slab_mem_going_online_callback(void *arg)
{
struct kmem_cache_node *n;
struct kmem_cache *s;
struct memory_notify *marg = arg;
int nid = marg->status_change_nid;
int ret = 0;
/*
* If the node's memory is already available, then kmem_cache_node is
* already created. Nothing to do.
*/
if (nid < 0)
return 0;
/*
* We are bringing a node online. No memory is availabe yet. We must
* allocate a kmem_cache_node structure in order to bring the node
* online.
*/
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
/*
* XXX: kmem_cache_alloc_node will fallback to other nodes
* since memory is not yet available from the node that
* is brought up.
*/
n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
if (!n) {
ret = -ENOMEM;
goto out;
}
init_kmem_cache_node(n);
s->node[nid] = n;
}
out:
up_read(&slub_lock);
return ret;
}
static int slab_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
int ret = 0;
switch (action) {
case MEM_GOING_ONLINE:
ret = slab_mem_going_online_callback(arg);
break;
case MEM_GOING_OFFLINE:
ret = slab_mem_going_offline_callback(arg);
break;
case MEM_OFFLINE:
case MEM_CANCEL_ONLINE:
slab_mem_offline_callback(arg);
break;
case MEM_ONLINE:
case MEM_CANCEL_OFFLINE:
break;
}
ret = notifier_from_errno(ret);
return ret;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
/********************************************************************
* Basic setup of slabs
*******************************************************************/
void __init kmem_cache_init(void)
{
int i;
int caches = 0;
init_alloc_cpu();
#ifdef CONFIG_NUMA
/*
* Must first have the slab cache available for the allocations of the
* struct kmem_cache_node's. There is special bootstrap code in
* kmem_cache_open for slab_state == DOWN.
*/
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
sizeof(struct kmem_cache_node), GFP_KERNEL);
kmalloc_caches[0].refcount = -1;
caches++;
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
#endif
/* Able to allocate the per node structures */
slab_state = PARTIAL;
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 64) {
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
caches++;
}
if (KMALLOC_MIN_SIZE <= 128) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
caches++;
}
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
}
/*
* Patch up the size_index table if we have strange large alignment
* requirements for the kmalloc array. This is only the case for
* MIPS it seems. The standard arches will not generate any code here.
*
* Largest permitted alignment is 256 bytes due to the way we
* handle the index determination for the smaller caches.
*
* Make sure that nothing crazy happens if someone starts tinkering
* around with ARCH_KMALLOC_MINALIGN
*/
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
#ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier);
kmem_size = offsetof(struct kmem_cache, cpu_slab) +
nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
#else
kmem_size = sizeof(struct kmem_cache);
#endif
printk(KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d\n",
caches, cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
}
/*
* Find a mergeable slab cache
*/
static int slab_unmergeable(struct kmem_cache *s)
{
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
if (s->ctor)
return 1;
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
if (s->refcount < 0)
return 1;
return 0;
}
static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags, const char *name,
void (*ctor)(struct kmem_cache *, void *))
{
struct kmem_cache *s;
if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL;
if (ctor)
return NULL;
size = ALIGN(size, sizeof(void *));
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
flags = kmem_cache_flags(size, flags, name, NULL);
list_for_each_entry(s, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
if (size > s->size)
continue;
if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
continue;
/*
* Check if alignment is compatible.
* Courtesy of Adrian Drzewiecki
*/
if ((s->size & ~(align - 1)) != s->size)
continue;
if (s->size - size >= sizeof(void *))
continue;
return s;
}
return NULL;
}
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(struct kmem_cache *, void *))
{
struct kmem_cache *s;
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
int cpu;
s->refcount++;
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
s->objsize = max(s->objsize, (int)size);
/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu(cpu)
get_cpu_slab(s, cpu)->objsize = s->objsize;
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);
if (sysfs_slab_alias(s, name))
goto err;
return s;
}
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, GFP_KERNEL, name,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
up_write(&slub_lock);
if (sysfs_slab_add(s))
goto err;
return s;
}
kfree(s);
}
up_write(&slub_lock);
err:
if (flags & SLAB_PANIC)
panic("Cannot create slabcache %s\n", name);
else
s = NULL;
return s;
}
EXPORT_SYMBOL(kmem_cache_create);
#ifdef CONFIG_SMP
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
*/
static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
struct kmem_cache *s;
unsigned long flags;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
init_alloc_cpu_cpu(cpu);
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list)
s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
GFP_KERNEL);
up_read(&slub_lock);
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
free_kmem_cache_cpu(c, cpu);
s->cpu_slab[cpu] = NULL;
}
up_read(&slub_lock);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata slab_notifier = {
.notifier_call = slab_cpuup_callback
};
#endif
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE))
return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, -1, caller);
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, void *caller)
{
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE))
return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, node, caller);
}
#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *))
{
unsigned long flags;
unsigned long x = 0;
struct page *page;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += get_count(page);
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
static int count_inuse(struct page *page)
{
return page->inuse;
}
static int count_total(struct page *page)
{
return page->objects;
}
static int count_free(struct page *page)
{
return page->objects - page->inuse;
}
#endif
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
void *p;
void *addr = page_address(page);
if (!check_slab(s, page) ||
!on_freelist(s, page, NULL))
return 0;
/* Now we know that a valid freelist exists */
bitmap_zero(map, page->objects);
for_each_free_object(p, s, page->freelist) {
set_bit(slab_index(p, s, addr), map);
if (!check_object(s, page, p, 0))
return 0;
}
for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))
if (!check_object(s, page, p, 1))
return 0;
return 1;
}
static void validate_slab_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
if (slab_trylock(page)) {
validate_slab(s, page, map);
slab_unlock(page);
} else
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
s->name, page);
if (s->flags & DEBUG_DEFAULT_FLAGS) {
if (!SlabDebug(page))
printk(KERN_ERR "SLUB %s: SlabDebug not set "
"on slab 0x%p\n", s->name, page);
} else {
if (SlabDebug(page))
printk(KERN_ERR "SLUB %s: SlabDebug set on "
"slab 0x%p\n", s->name, page);
}
}
static int validate_slab_node(struct kmem_cache *s,
struct kmem_cache_node *n, unsigned long *map)
{
unsigned long count = 0;
struct page *page;
unsigned long flags;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
count++;
}
if (count != n->nr_partial)
printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
"counter=%ld\n", s->name, count, n->nr_partial);
if (!(s->flags & SLAB_STORE_USER))
goto out;
list_for_each_entry(page, &n->full, lru) {
validate_slab_slab(s, page, map);
count++;
}
if (count != atomic_long_read(&n->nr_slabs))
printk(KERN_ERR "SLUB: %s %ld slabs counted but "
"counter=%ld\n", s->name, count,
atomic_long_read(&n->nr_slabs));
out:
spin_unlock_irqrestore(&n->list_lock, flags);
return count;
}
static long validate_slab_cache(struct kmem_cache *s)
{
int node;
unsigned long count = 0;
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
sizeof(unsigned long), GFP_KERNEL);
if (!map)
return -ENOMEM;
flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
count += validate_slab_node(s, n, map);
}
kfree(map);
return count;
}
#ifdef SLUB_RESILIENCY_TEST
static void resiliency_test(void)
{
u8 *p;
printk(KERN_ERR "SLUB resiliency testing\n");
printk(KERN_ERR "-----------------------\n");
printk(KERN_ERR "A. Corruption after allocation\n");
p = kzalloc(16, GFP_KERNEL);
p[16] = 0x12;
printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
" 0x12->0x%p\n\n", p + 16);
validate_slab_cache(kmalloc_caches + 4);
/* Hmmm... The next two are dangerous */
p = kzalloc(32, GFP_KERNEL);
p[32 + sizeof(void *)] = 0x34;
printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
" 0x34 -> -0x%p\n", p);
printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches + 5);
p = kzalloc(64, GFP_KERNEL);
p += 64 + (get_cycles() & 0xff) * sizeof(void *);
*p = 0x56;
printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
p);
printk(KERN_ERR
"If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches + 6);
printk(KERN_ERR "\nB. Corruption after free\n");
p = kzalloc(128, GFP_KERNEL);
kfree(p);
*p = 0x78;
printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches + 7);
p = kzalloc(256, GFP_KERNEL);
kfree(p);
p[50] = 0x9a;
printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
p);
validate_slab_cache(kmalloc_caches + 8);
p = kzalloc(512, GFP_KERNEL);
kfree(p);
p[512] = 0xab;
printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
validate_slab_cache(kmalloc_caches + 9);
}
#else
static void resiliency_test(void) {};
#endif
/*
* Generate lists of code addresses where slabcache objects are allocated
* and freed.
*/
struct location {
unsigned long count;
void *addr;
long long sum_time;
long min_time;
long max_time;
long min_pid;
long max_pid;
cpumask_t cpus;
nodemask_t nodes;
};
struct loc_track {
unsigned long max;
unsigned long count;
struct location *loc;
};
static void free_loc_track(struct loc_track *t)
{
if (t->max)
free_pages((unsigned long)t->loc,
get_order(sizeof(struct location) * t->max));
}
static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
{
struct location *l;
int order;
order = get_order(sizeof(struct location) * max);
l = (void *)__get_free_pages(flags, order);
if (!l)
return 0;
if (t->count) {
memcpy(l, t->loc, sizeof(struct location) * t->count);
free_loc_track(t);
}
t->max = max;
t->loc = l;
return 1;
}
static int add_location(struct loc_track *t, struct kmem_cache *s,
const struct track *track)
{
long start, end, pos;
struct location *l;
void *caddr;
unsigned long age = jiffies - track->when;
start = -1;
end = t->count;
for ( ; ; ) {
pos = start + (end - start + 1) / 2;
/*
* There is nothing at "end". If we end up there
* we need to add something to before end.
*/
if (pos == end)
break;
caddr = t->loc[pos].addr;
if (track->addr == caddr) {
l = &t->loc[pos];
l->count++;
if (track->when) {
l->sum_time += age;
if (age < l->min_time)
l->min_time = age;
if (age > l->max_time)
l->max_time = age;
if (track->pid < l->min_pid)
l->min_pid = track->pid;
if (track->pid > l->max_pid)
l->max_pid = track->pid;
cpu_set(track->cpu, l->cpus);
}
node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1;
}
if (track->addr < caddr)
end = pos;
else
start = pos;
}
/*
* Not found. Insert new tracking element.
*/
if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
return 0;
l = t->loc + pos;
if (pos < t->count)
memmove(l + 1, l,
(t->count - pos) * sizeof(struct location));
t->count++;
l->count = 1;
l->addr = track->addr;
l->sum_time = age;
l->min_time = age;
l->max_time = age;
l->min_pid = track->pid;
l->max_pid = track->pid;
cpus_clear(l->cpus);
cpu_set(track->cpu, l->cpus);
nodes_clear(l->nodes);
node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1;
}
static void process_slab(struct loc_track *t, struct kmem_cache *s,
struct page *page, enum track_item alloc)
{
void *addr = page_address(page);
DECLARE_BITMAP(map, page->objects);
void *p;
bitmap_zero(map, page->objects);
for_each_free_object(p, s, page->freelist)
set_bit(slab_index(p, s, addr), map);
for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))
add_location(t, s, get_track(s, p, alloc));
}
static int list_locations(struct kmem_cache *s, char *buf,
enum track_item alloc)
{
int len = 0;
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_TEMPORARY))
return sprintf(buf, "Out of memory\n");
/* Push back cpu slabs */
flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
process_slab(&t, s, page, alloc);
list_for_each_entry(page, &n->full, lru)
process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
if (len > PAGE_SIZE - 100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
len += sprint_symbol(buf + len, (unsigned long)l->addr);
else
len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) {
len += sprintf(buf + len, " age=%ld/%ld/%ld",
l->min_time,
(long)div_u64(l->sum_time, l->count),
l->max_time);
} else
len += sprintf(buf + len, " age=%ld",
l->min_time);
if (l->min_pid != l->max_pid)
len += sprintf(buf + len, " pid=%ld-%ld",
l->min_pid, l->max_pid);
else
len += sprintf(buf + len, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->cpus);
}
if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->nodes);
}
len += sprintf(buf + len, "\n");
}
free_loc_track(&t);
if (!t.count)
len += sprintf(buf, "No data\n");
return len;
}
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
SL_CPU, /* Only slabs used for cpu caches */
SL_OBJECTS, /* Determine allocated objects not slabs */
SL_TOTAL /* Determine object capacity not slabs */
};
#define SO_ALL (1 << SL_ALL)
#define SO_PARTIAL (1 << SL_PARTIAL)
#define SO_CPU (1 << SL_CPU)
#define SO_OBJECTS (1 << SL_OBJECTS)
#define SO_TOTAL (1 << SL_TOTAL)
static ssize_t show_slab_objects(struct kmem_cache *s,
char *buf, unsigned long flags)
{
unsigned long total = 0;
int node;
int x;
unsigned long *nodes;
unsigned long *per_cpu;
nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
if (!nodes)
return -ENOMEM;
per_cpu = nodes + nr_node_ids;
if (flags & SO_CPU) {
int cpu;
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
if (!c || c->node < 0)
continue;
if (c->page) {
if (flags & SO_TOTAL)
x = c->page->objects;
else if (flags & SO_OBJECTS)
x = c->page->inuse;
else
x = 1;
total += x;
nodes[c->node] += x;
}
per_cpu[c->node]++;
}
}
if (flags & SO_ALL) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
if (flags & SO_TOTAL)
x = atomic_long_read(&n->total_objects);
else if (flags & SO_OBJECTS)
x = atomic_long_read(&n->total_objects) -
count_partial(n, count_free);
else
x = atomic_long_read(&n->nr_slabs);
total += x;
nodes[node] += x;
}
} else if (flags & SO_PARTIAL) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
if (flags & SO_TOTAL)
x = count_partial(n, count_total);
else if (flags & SO_OBJECTS)
x = count_partial(n, count_inuse);
else
x = n->nr_partial;
total += x;
nodes[node] += x;
}
}
x = sprintf(buf, "%lu", total);
#ifdef CONFIG_NUMA
for_each_node_state(node, N_NORMAL_MEMORY)
if (nodes[node])
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
#endif
kfree(nodes);
return x + sprintf(buf + x, "\n");
}
static int any_slab_objects(struct kmem_cache *s)
{
int node;
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
if (!n)
continue;
if (atomic_read(&n->total_objects))
return 1;
}
return 0;
}
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
#define to_slab(n) container_of(n, struct kmem_cache, kobj);
struct slab_attribute {
struct attribute attr;
ssize_t (*show)(struct kmem_cache *s, char *buf);
ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
};
#define SLAB_ATTR_RO(_name) \
static struct slab_attribute _name##_attr = __ATTR_RO(_name)
#define SLAB_ATTR(_name) \
static struct slab_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->size);
}
SLAB_ATTR_RO(slab_size);
static ssize_t align_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->align);
}
SLAB_ATTR_RO(align);
static ssize_t object_size_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->objsize);
}
SLAB_ATTR_RO(object_size);
static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", oo_objects(s->oo));
}
SLAB_ATTR_RO(objs_per_slab);
static ssize_t order_store(struct kmem_cache *s,
const char *buf, size_t length)
{
int order = simple_strtoul(buf, NULL, 10);
if (order > slub_max_order || order < slub_min_order)
return -EINVAL;
calculate_sizes(s, order);
return length;
}
static ssize_t order_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", oo_order(s->oo));
}
SLAB_ATTR(order);
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (s->ctor) {
int n = sprint_symbol(buf, (unsigned long)s->ctor);
return n + sprintf(buf + n, "\n");
}
return 0;
}
SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->refcount - 1);
}
SLAB_ATTR_RO(aliases);
static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL);
}
SLAB_ATTR_RO(slabs);
static ssize_t partial_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_PARTIAL);
}
SLAB_ATTR_RO(partial);
static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_CPU);
}
SLAB_ATTR_RO(cpu_slabs);
static ssize_t objects_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
}
SLAB_ATTR_RO(objects);
static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
}
SLAB_ATTR_RO(objects_partial);
static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
}
SLAB_ATTR_RO(total_objects);
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
}
static ssize_t sanity_checks_store(struct kmem_cache *s,
const char *buf, size_t length)
{
s->flags &= ~SLAB_DEBUG_FREE;
if (buf[0] == '1')
s->flags |= SLAB_DEBUG_FREE;
return length;
}
SLAB_ATTR(sanity_checks);
static ssize_t trace_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
}
static ssize_t trace_store(struct kmem_cache *s, const char *buf,
size_t length)
{
s->flags &= ~SLAB_TRACE;
if (buf[0] == '1')
s->flags |= SLAB_TRACE;
return length;
}
SLAB_ATTR(trace);
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
}
static ssize_t reclaim_account_store(struct kmem_cache *s,
const char *buf, size_t length)
{
s->flags &= ~SLAB_RECLAIM_ACCOUNT;
if (buf[0] == '1')
s->flags |= SLAB_RECLAIM_ACCOUNT;
return length;
}
SLAB_ATTR(reclaim_account);
static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
}
SLAB_ATTR_RO(hwcache_align);
#ifdef CONFIG_ZONE_DMA
static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
}
SLAB_ATTR_RO(cache_dma);
#endif
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
}
SLAB_ATTR_RO(destroy_by_rcu);
static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
}
static ssize_t red_zone_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (any_slab_objects(s))
return -EBUSY;
s->flags &= ~SLAB_RED_ZONE;
if (buf[0] == '1')
s->flags |= SLAB_RED_ZONE;
calculate_sizes(s, -1);
return length;
}
SLAB_ATTR(red_zone);
static ssize_t poison_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
}
static ssize_t poison_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (any_slab_objects(s))
return -EBUSY;
s->flags &= ~SLAB_POISON;
if (buf[0] == '1')
s->flags |= SLAB_POISON;
calculate_sizes(s, -1);
return length;
}
SLAB_ATTR(poison);
static ssize_t store_user_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
}
static ssize_t store_user_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (any_slab_objects(s))
return -EBUSY;
s->flags &= ~SLAB_STORE_USER;
if (buf[0] == '1')
s->flags |= SLAB_STORE_USER;
calculate_sizes(s, -1);
return length;
}
SLAB_ATTR(store_user);
static ssize_t validate_show(struct kmem_cache *s, char *buf)
{
return 0;
}
static ssize_t validate_store(struct kmem_cache *s,
const char *buf, size_t length)
{
int ret = -EINVAL;
if (buf[0] == '1') {
ret = validate_slab_cache(s);
if (ret >= 0)
ret = length;
}
return ret;
}
SLAB_ATTR(validate);
static ssize_t shrink_show(struct kmem_cache *s, char *buf)
{
return 0;
}
static ssize_t shrink_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (buf[0] == '1') {
int rc = kmem_cache_shrink(s);
if (rc)
return rc;
} else
return -EINVAL;
return length;
}
SLAB_ATTR(shrink);
static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_ALLOC);
}
SLAB_ATTR_RO(alloc_calls);
static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_FREE);
}
SLAB_ATTR_RO(free_calls);
#ifdef CONFIG_NUMA
static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
}
static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
const char *buf, size_t length)
{
int n = simple_strtoul(buf, NULL, 10);
if (n < 100)
s->remote_node_defrag_ratio = n * 10;
return length;
}
SLAB_ATTR(remote_node_defrag_ratio);
#endif
#ifdef CONFIG_SLUB_STATS
static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
{
unsigned long sum = 0;
int cpu;
int len;
int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
if (!data)
return -ENOMEM;
for_each_online_cpu(cpu) {
unsigned x = get_cpu_slab(s, cpu)->stat[si];
data[cpu] = x;
sum += x;
}
len = sprintf(buf, "%lu", sum);
#ifdef CONFIG_SMP
for_each_online_cpu(cpu) {
if (data[cpu] && len < PAGE_SIZE - 20)
len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
}
#endif
kfree(data);
return len + sprintf(buf + len, "\n");
}
#define STAT_ATTR(si, text) \
static ssize_t text##_show(struct kmem_cache *s, char *buf) \
{ \
return show_stat(s, buf, si); \
} \
SLAB_ATTR_RO(text); \
STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
STAT_ATTR(FREE_FASTPATH, free_fastpath);
STAT_ATTR(FREE_SLOWPATH, free_slowpath);
STAT_ATTR(FREE_FROZEN, free_frozen);
STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
STAT_ATTR(ALLOC_SLAB, alloc_slab);
STAT_ATTR(ALLOC_REFILL, alloc_refill);
STAT_ATTR(FREE_SLAB, free_slab);
STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
STAT_ATTR(ORDER_FALLBACK, order_fallback);
#endif
static struct attribute *slab_attrs[] = {
&slab_size_attr.attr,
&object_size_attr.attr,
&objs_per_slab_attr.attr,
&order_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&total_objects_attr.attr,
&slabs_attr.attr,
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,
&aliases_attr.attr,
&align_attr.attr,
&sanity_checks_attr.attr,
&trace_attr.attr,
&hwcache_align_attr.attr,
&reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr,
&red_zone_attr.attr,
&poison_attr.attr,
&store_user_attr.attr,
&validate_attr.attr,
&shrink_attr.attr,
&alloc_calls_attr.attr,
&free_calls_attr.attr,
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
#ifdef CONFIG_SLUB_STATS
&alloc_fastpath_attr.attr,
&alloc_slowpath_attr.attr,
&free_fastpath_attr.attr,
&free_slowpath_attr.attr,
&free_frozen_attr.attr,
&free_add_partial_attr.attr,
&free_remove_partial_attr.attr,
&alloc_from_partial_attr.attr,
&alloc_slab_attr.attr,
&alloc_refill_attr.attr,
&free_slab_attr.attr,
&cpuslab_flush_attr.attr,
&deactivate_full_attr.attr,
&deactivate_empty_attr.attr,
&deactivate_to_head_attr.attr,
&deactivate_to_tail_attr.attr,
&deactivate_remote_frees_attr.attr,
&order_fallback_attr.attr,
#endif
NULL
};
static struct attribute_group slab_attr_group = {
.attrs = slab_attrs,
};
static ssize_t slab_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct slab_attribute *attribute;
struct kmem_cache *s;
int err;
attribute = to_slab_attr(attr);
s = to_slab(kobj);
if (!attribute->show)
return -EIO;
err = attribute->show(s, buf);
return err;
}
static ssize_t slab_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct slab_attribute *attribute;
struct kmem_cache *s;
int err;
attribute = to_slab_attr(attr);
s = to_slab(kobj);
if (!attribute->store)
return -EIO;
err = attribute->store(s, buf, len);
return err;
}
static void kmem_cache_release(struct kobject *kobj)
{
struct kmem_cache *s = to_slab(kobj);
kfree(s);
}
static struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
};
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
.release = kmem_cache_release
};
static int uevent_filter(struct kset *kset, struct kobject *kobj)
{
struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &slab_ktype)
return 1;
return 0;
}
static struct kset_uevent_ops slab_uevent_ops = {
.filter = uevent_filter,
};
static struct kset *slab_kset;
#define ID_STR_LENGTH 64
/* Create a unique string id for a slab cache:
*
* Format :[flags-]size
*/
static char *create_unique_id(struct kmem_cache *s)
{
char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
char *p = name;
BUG_ON(!name);
*p++ = ':';
/*
* First flags affecting slabcache operations. We will only
* get here for aliasable slabs so we do not need to support
* too many flags. The flags here must cover all flags that
* are matched during merging to guarantee that the id is
* unique.
*/
if (s->flags & SLAB_CACHE_DMA)
*p++ = 'd';
if (s->flags & SLAB_RECLAIM_ACCOUNT)
*p++ = 'a';
if (s->flags & SLAB_DEBUG_FREE)
*p++ = 'F';
if (p != name + 1)
*p++ = '-';
p += sprintf(p, "%07d", s->size);
BUG_ON(p > name + ID_STR_LENGTH - 1);
return name;
}
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
const char *name;
int unmergeable;
if (slab_state < SYSFS)
/* Defer until later */
return 0;
unmergeable = slab_unmergeable(s);
if (unmergeable) {
/*
* Slabcache can never be merged so we can use the name proper.
* This is typically the case for debug situations. In that
* case we can catch duplicate names easily.
*/
sysfs_remove_link(&slab_kset->kobj, s->name);
name = s->name;
} else {
/*
* Create a unique name for the slab as a target
* for the symlinks.
*/
name = create_unique_id(s);
}
s->kobj.kset = slab_kset;
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
if (err) {
kobject_put(&s->kobj);
return err;
}
err = sysfs_create_group(&s->kobj, &slab_attr_group);
if (err)
return err;
kobject_uevent(&s->kobj, KOBJ_ADD);
if (!unmergeable) {
/* Setup first alias */
sysfs_slab_alias(s, s->name);
kfree(name);
}
return 0;
}
static void sysfs_slab_remove(struct kmem_cache *s)
{
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
/*
* Need to buffer aliases during bootup until sysfs becomes
* available lest we loose that information.
*/
struct saved_alias {
struct kmem_cache *s;
const char *name;
struct saved_alias *next;
};
static struct saved_alias *alias_list;
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
if (slab_state == SYSFS) {
/*
* If we have a leftover link then remove it.
*/
sysfs_remove_link(&slab_kset->kobj, name);
return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
}
al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
if (!al)
return -ENOMEM;
al->s = s;
al->name = name;
al->next = alias_list;
alias_list = al;
return 0;
}
static int __init slab_sysfs_init(void)
{
struct kmem_cache *s;
int err;
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) {
printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS;
}
slab_state = SYSFS;
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
if (err)
printk(KERN_ERR "SLUB: Unable to add boot slab %s"
" to sysfs\n", s->name);
}
while (alias_list) {
struct saved_alias *al = alias_list;
alias_list = alias_list->next;
err = sysfs_slab_alias(al->s, al->name);
if (err)
printk(KERN_ERR "SLUB: Unable to add boot slab alias"
" %s to sysfs\n", s->name);
kfree(al);
}
resiliency_test();
return 0;
}
__initcall(slab_sysfs_init);
#endif
/*
* The /proc/slabinfo ABI
*/
#ifdef CONFIG_SLABINFO
ssize_t slabinfo_write(struct file *file, const char __user * buffer,
size_t count, loff_t *ppos)
{
return -EINVAL;
}
static void print_slabinfo_header(struct seq_file *m)
{
seq_puts(m, "slabinfo - version: 2.1\n");
seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
"<objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
seq_putc(m, '\n');
}
static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
down_read(&slub_lock);
if (!n)
print_slabinfo_header(m);
return seq_list_start(&slab_caches, *pos);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}
static void s_stop(struct seq_file *m, void *p)
{
up_read(&slub_lock);
}
static int s_show(struct seq_file *m, void *p)
{
unsigned long nr_partials = 0;
unsigned long nr_slabs = 0;
unsigned long nr_inuse = 0;
unsigned long nr_objs = 0;
unsigned long nr_free = 0;
struct kmem_cache *s;
int node;
s = list_entry(p, struct kmem_cache, list);
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
if (!n)
continue;
nr_partials += n->nr_partial;
nr_slabs += atomic_long_read(&n->nr_slabs);
nr_objs += atomic_long_read(&n->total_objects);
nr_free += count_partial(n, count_free);
}
nr_inuse = nr_objs - nr_free;
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
nr_objs, s->size, oo_objects(s->oo),
(1 << oo_order(s->oo)));
seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
0UL);
seq_putc(m, '\n');
return 0;
}
const struct seq_operations slabinfo_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
#endif /* CONFIG_SLABINFO */
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_9 |
crossvul-cpp_data_good_3654_0 | /*
* Copyright © 2008,2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
* as that may want to be batched with other set_domain operations
*
* This is (we hope) the only really tricky part of gem. The goal
* is fairly simple -- track which caches hold bits of the object
* and make sure they remain coherent. A few concrete examples may
* help to explain how it works. For shorthand, we use the notation
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
* a pair of read and write domain masks.
*
* Case 1: the batch buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Mapped to GTT
* 4. Read by GPU
* 5. Unmapped from GTT
* 6. Freed
*
* Let's take these a step at a time
*
* 1. Allocated
* Pages allocated from the kernel may still have
* cache contents, so we set them to (CPU, CPU) always.
* 2. Written by CPU (using pwrite)
* The pwrite function calls set_domain (CPU, CPU) and
* this function does nothing (as nothing changes)
* 3. Mapped by GTT
* This function asserts that the object is not
* currently in any GPU-based read or write domains
* 4. Read by GPU
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
* As write_domain is zero, this function adds in the
* current read domains (CPU+COMMAND, 0).
* flush_domains is set to CPU.
* invalidate_domains is set to COMMAND
* clflush is run to get data out of the CPU caches
* then i915_dev_set_domain calls i915_gem_flush to
* emit an MI_FLUSH and drm_agp_chipset_flush
* 5. Unmapped from GTT
* i915_gem_object_unbind calls set_domain (CPU, CPU)
* flush_domains and invalidate_domains end up both zero
* so no flushing/invalidating happens
* 6. Freed
* yay, done
*
* Case 2: The shared render buffer
*
* 1. Allocated
* 2. Mapped to GTT
* 3. Read/written by GPU
* 4. set_domain to (CPU,CPU)
* 5. Read/written by CPU
* 6. Read/written by GPU
*
* 1. Allocated
* Same as last example, (CPU, CPU)
* 2. Mapped to GTT
* Nothing changes (assertions find that it is not in the GPU)
* 3. Read/written by GPU
* execbuffer calls set_domain (RENDER, RENDER)
* flush_domains gets CPU
* invalidate_domains gets GPU
* clflush (obj)
* MI_FLUSH and drm_agp_chipset_flush
* 4. set_domain (CPU, CPU)
* flush_domains gets GPU
* invalidate_domains gets CPU
* wait_rendering (obj) to make sure all drawing is complete.
* This will include an MI_FLUSH to get the data from GPU
* to memory
* clflush (obj) to invalidate the CPU cache
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
* 5. Read/written by CPU
* cache lines are loaded and dirtied
* 6. Read written by GPU
* Same as last GPU access
*
* Case 3: The constant buffer
*
* 1. Allocated
* 2. Written by CPU
* 3. Read by GPU
* 4. Updated (written) by CPU again
* 5. Read by GPU
*
* 1. Allocated
* (CPU, CPU)
* 2. Written by CPU
* (CPU, CPU)
* 3. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
* 4. Updated (written) by CPU again
* (CPU, CPU)
* flush_domains = 0 (no previous write domain)
* invalidate_domains = 0 (no new read domains)
* 5. Read by GPU
* (CPU+RENDER, 0)
* flush_domains = CPU
* invalidate_domains = RENDER
* clflush (obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
struct change_domains *cd)
{
uint32_t invalidate_domains = 0, flush_domains = 0;
/*
* If the object isn't moving to a new write domain,
* let the object stay in multiple read domains
*/
if (obj->base.pending_write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
/*
* Flush the current write domain if
* the new read domains don't match. Invalidate
* any read domains which differ from the old
* write domain
*/
if (obj->base.write_domain &&
(((obj->base.write_domain != obj->base.pending_read_domains ||
obj->ring != ring)) ||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
flush_domains |= obj->base.write_domain;
invalidate_domains |=
obj->base.pending_read_domains & ~obj->base.write_domain;
}
/*
* Invalidate any read caches which may have
* stale data. That is, any new read domains.
*/
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
* write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that.
*/
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
int and;
struct hlist_head buckets[0];
};
static struct eb_objects *
eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_KERNEL);
if (eb == NULL)
return eb;
eb->and = count - 1;
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
hlist_add_head(&obj->exec_node,
&eb->buckets[obj->exec_handle & eb->and]);
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
struct hlist_head *head;
struct hlist_node *node;
struct drm_i915_gem_object *obj;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
return NULL;
}
static void
eb_destroy(struct eb_objects *eb)
{
kfree(eb);
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
target_obj = &eb_get_object(eb, reloc->target_handle)->base;
if (unlikely(target_obj == NULL))
return -ENOENT;
target_offset = to_intel_bo(target_obj)->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
return ret;
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
return ret;
}
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (target_offset == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
(int) obj->base.size);
return ret;
}
if (unlikely(reloc->offset & 3)) {
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
return ret;
}
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && in_atomic())
return -EFAULT;
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
return ret;
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
iowrite32(reloc->delta, reloc_entry);
io_mapping_unmap_atomic(reloc_page);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
return 0;
}
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
struct drm_i915_gem_relocation_entry reloc;
if (__copy_from_user_inatomic(&reloc,
user_relocs+i,
sizeof(reloc)))
return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret)
return ret;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
&reloc.presumed_offset,
sizeof(reloc.presumed_offset)))
return -EFAULT;
}
return 0;
}
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret)
return ret;
}
return 0;
}
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
}
pagefault_enable();
return ret;
}
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
return ret;
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
goto err_unpin;
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
obj->pending_fenced_gpu_access = true;
}
}
entry->offset = obj->gtt_offset;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
return ret;
}
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
*
* 1a. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 1b. Increment pin count for already bound objects.
* 2. Bind new objects.
* 3. Decrement pin count.
*
* This avoid unnecessary unbinding of later objects in order to makr
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
ret = 0;
/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
if (!obj->gtt_space)
continue;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space)
continue;
ret = pin_and_fence_object(obj, ring);
if (ret) {
int ret_ignore;
/* This can potentially raise a harmless
* -EINVAL if we failed to bind in the above
* call. It cannot raise -EINTR since we know
* that the bo is freshly bound and so will
* not need to be flushed or waited upon.
*/
ret_ignore = i915_gem_object_unbind(obj);
(void)ret_ignore;
WARN_ON(obj->gtt_space);
break;
}
}
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
/* ... and ensure ppgtt mapping exist if needed. */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
}
if (ret != -ENOSPC || retry > 1)
return ret;
/* First attempt, just clear anything that is purgeable.
* Second attempt, clear the entire GTT.
*/
ret = i915_gem_evict_everything(ring->dev, retry == 0);
if (ret)
return ret;
retry++;
} while (1);
err:
list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
}
return ret;
}
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
int *reloc_offset;
int i, total, ret;
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
total = 0;
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
reloc = drm_malloc_ab(total, sizeof(*reloc));
if (reloc == NULL || reloc_offset == NULL) {
drm_free_large(reloc);
drm_free_large(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
}
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret) {
mutex_lock(&dev->struct_mutex);
goto err;
}
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
}
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret)
goto err;
list_for_each_entry(obj, objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
if (ret)
goto err;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
drm_free_large(reloc);
drm_free_large(reloc_offset);
return ret;
}
static int
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
}
static bool
intel_enable_semaphores(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Disable semaphores on SNB */
if (INTEL_INFO(dev)->gen == 6)
return 0;
return 1;
}
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
{
struct intel_ring_buffer *from = obj->ring;
u32 seqno;
int ret, idx;
if (from == NULL || to == from)
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
seqno = obj->last_rendering_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
if (seqno == from->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
}
seqno = request->seqno;
}
from->sync_seqno[idx] = seqno;
return to->sync_to(to, from, seqno - 1);
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
struct change_domains cd;
int ret;
memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
if (ret)
return ret;
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
if (ret)
return ret;
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
return ret;
}
return 0;
}
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
return -EINVAL;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
if (fault_in_pages_readable(ptr, length))
return -EFAULT;
}
return 0;
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
u32 seqno)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
u32 invalidate;
/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires.
*
* The sampler always gets flushed on i965 (sigh).
*/
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request);
}
}
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
return 0;
ret = intel_ring_begin(ring, 4 * 3);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
return 0;
}
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
return ret;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4)
return -EINVAL;
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
(struct drm_clip_rect __user *)(uintptr_t)
args->cliprects_ptr,
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto pre_mutex_err;
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
}
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
BUG_ON(ring->sync_seqno[i]);
}
}
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
if (ret)
goto err;
}
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
kfree(cliprects);
return ret;
}
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
*/
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
/* Copy in the exec list from userland */
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
return -EFAULT;
}
for (i = 0; i < args->buffer_count; i++) {
exec2_list[i].handle = exec_list[i].handle;
exec2_list[i].relocation_count = exec_list[i].relocation_count;
exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
exec2_list[i].alignment = exec_list[i].alignment;
exec2_list[i].offset = exec_list[i].offset;
if (INTEL_INFO(dev)->gen < 4)
exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
else
exec2_list[i].flags = 0;
}
exec2.buffers_ptr = args->buffers_ptr;
exec2.buffer_count = args->buffer_count;
exec2.batch_start_offset = args->batch_start_offset;
exec2.batch_len = args->batch_len;
exec2.DR1 = args->DR1;
exec2.DR4 = args->DR4;
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec_list);
drm_free_large(exec2_list);
return ret;
}
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
(struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
}
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
}
drm_free_large(exec2_list);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3654_0 |
crossvul-cpp_data_bad_3541_1 | /*
* Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
unsigned long mmcr[3];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned int group_flag;
int n_txn_start;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
/*
* Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
* and a few other names are different.
*/
#ifdef CONFIG_PPC32
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
#define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
}
static inline void perf_read_regs(struct pt_regs *regs) { }
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_PPC32 */
/*
* Things that are specific to 64-bit implementations.
*/
#ifdef CONFIG_PPC64
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
return 0;
}
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
* bit in MMCRA.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
*addrp = mfspr(SPRN_SDAR);
}
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
unsigned long sihv = MMCRA_SIHV;
unsigned long sipr = MMCRA_SIPR;
if (TRAP(regs) != 0xf00)
return 0; /* not a PMU interrupt */
if (ppmu->flags & PPMU_ALT_SIPR) {
sihv = POWER6_MMCRA_SIHV;
sipr = POWER6_MMCRA_SIPR;
}
/* PR has priority over HV, so order below is important */
if (mmcra & sipr)
return PERF_RECORD_MISC_USER;
if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
*/
static inline void perf_read_regs(struct pt_regs *regs)
{
regs->dsisr = mfspr(SPRN_MMCRA);
}
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return !regs->softe;
}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
void perf_event_print_debug(void)
{
}
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 1:
val = mfspr(SPRN_PMC1);
break;
case 2:
val = mfspr(SPRN_PMC2);
break;
case 3:
val = mfspr(SPRN_PMC3);
break;
case 4:
val = mfspr(SPRN_PMC4);
break;
case 5:
val = mfspr(SPRN_PMC5);
break;
case 6:
val = mfspr(SPRN_PMC6);
break;
#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 1:
mtspr(SPRN_PMC1, val);
break;
case 2:
mtspr(SPRN_PMC2, val);
break;
case 3:
mtspr(SPRN_PMC3, val);
break;
case 4:
mtspr(SPRN_PMC4, val);
break;
case 5:
mtspr(SPRN_PMC5, val);
break;
case 6:
mtspr(SPRN_PMC6, val);
break;
#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
}
/*
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event_id[].
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
&& !ppmu->limited_pmc_event(event_id[i])) {
ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0]))
return -1;
}
value = mask = 0;
for (i = 0; i < n_ev; ++i) {
nv = (value | cpuhw->avalues[i][0]) +
(value & cpuhw->avalues[i][0] & addf);
if ((((nv + tadd) ^ value) & mask) != 0 ||
(((nv + tadd) ^ cpuhw->avalues[i][0]) &
cpuhw->amasks[i][0]) != 0)
break;
value = nv;
mask |= cpuhw->amasks[i][0];
}
if (i == n_ev)
return 0; /* all OK */
/* doesn't work, gather alternatives... */
if (!ppmu->get_alternatives)
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
&cpuhw->avalues[i][j]);
}
/* enumerate all possibilities and see if any will work */
i = 0;
j = -1;
value = mask = nv = 0;
while (i < n_ev) {
if (j >= 0) {
/* we're backtracking, restore context */
value = svalues[i];
mask = smasks[i];
j = choice[i];
}
/*
* See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
nv = (value | cpuhw->avalues[i][j]) +
(value & cpuhw->avalues[i][j] & addf);
if ((((nv + tadd) ^ value) & mask) == 0 &&
(((nv + tadd) ^ cpuhw->avalues[i][j])
& cpuhw->amasks[i][j]) == 0)
break;
}
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
* to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
* Found a feasible alternative for event_id i,
* remember where we got up to with this event_id,
* go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
svalues[i] = value;
smasks[i] = mask;
value = nv;
mask |= cpuhw->amasks[i][j];
++i;
j = -1;
}
}
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
* Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
* added events.
*/
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
struct perf_event *event;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; ++i) {
if (cflags[i] & PPMU_LIMITED_PMC_OK) {
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
event = ctrs[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
if (eu || ek || eh)
for (i = 0; i < n; ++i)
if (cflags[i] & PPMU_LIMITED_PMC_OK)
cflags[i] |= PPMU_LIMITED_PMC_REQD;
return 0;
}
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
if (!event->hw.idx)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
* us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
return (ppmu->flags & PPMU_LIMITED_PMC5_6)
&& (pmcnum == 5 || pmcnum == 6);
}
static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
if (!event->hw.idx)
continue;
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
}
}
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
/*
* Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
* other events. We try to keep the values from the limited
* events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
* the limited events as small and consistent as possible.
* Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
if (!cpuhw->n_limited) {
mtspr(SPRN_MMCR0, mmcr0);
return;
}
/*
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
* events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
: "=&r" (pmc5), "=&r" (pmc6)
: "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
"i" (SPRN_MMCR0),
"i" (SPRN_PMC5), "i" (SPRN_PMC6));
if (mmcr0 & MMCR0_FC)
freeze_limited_counters(cpuhw, pmc5, pmc6);
else
thaw_limited_counters(cpuhw, pmc5, pmc6);
/*
* Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
mtspr(SPRN_MMCR0, mmcr0);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
/*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mtspr(SPRN_MMCRA,
cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mb();
}
/*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
mb();
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val;
s64 left;
unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
}
cpuhw->disabled = 0;
/*
* If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_events == 0)
ppc_set_pmu_inuse(0);
goto out_enable;
}
/*
* Compute MMCR* values for the new set of events
*/
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
cpuhw->mmcr)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
/*
* Add in MMCR0 freeze bits corresponding to the
* attr.exclude_* bits for the first event.
* We have already checked that all events have the
* same values for these bits as the first event.
*/
event = cpuhw->event[0];
if (event->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
if (event->attr.exclude_kernel)
cpuhw->mmcr[0] |= freeze_events_kernel;
if (event->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
* Write the new configuration to MMCR* with the freeze
* bit set and set the hardware events to their initial values.
* Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
/*
* Read off any pre-existing events that need to move
* to another PMC.
*/
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
power_pmu_read(event);
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
}
/*
* Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
cpuhw->limited_counter[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
val = 0;
if (event->hw.sample_period) {
left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
if (event->hw.state & PERF_HES_STOPPED)
val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
out_enable:
mb();
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
/*
* Enable instruction sampling if necessary
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mb();
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
flags[n] = event->hw.event_base;
events[n++] = event->hw.config;
}
}
return n;
}
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
cpuhw = &__get_cpu_var(cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN)
goto nocheck;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
event->hw.config = cpuhw->events[n0];
nocheck:
++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
/*
* Remove a event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
cpuhw = &__get_cpu_var(cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
cpuhw->event[i-1] = cpuhw->event[i];
cpuhw->events[i-1] = cpuhw->events[i];
cpuhw->flags[i-1] = cpuhw->flags[i];
}
--cpuhw->n_events;
ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
if (event->hw.idx) {
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
if (event == cpuhw->limited_counter[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
}
--cpuhw->n_limited;
}
if (cpuhw->n_events == 0) {
/* disable exceptions if no events are running */
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* POWER-PMU does not support disabling individual counters, hence
* program their cycle counter to their max value and ignore the interrupts.
*/
static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void power_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
if (!ppmu)
return -EAGAIN;
cpuhw = &__get_cpu_var(cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
if (i < 0)
return -EAGAIN;
for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
* A event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
if (event->attr.exclude_user
|| event->attr.exclude_kernel
|| event->attr.exclude_hv
|| event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
* The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
return 0;
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
return n > 0;
}
/*
* Find an alternative event_id that goes on a normal PMC, if possible,
* and return the event_id code, or 0 if there is no such alternative.
* (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
n = ppmu->get_alternatives(ev, flags, alt);
if (!n)
return 0;
return alt[0];
}
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
struct cpu_hw_events *cpuhw;
if (!ppmu)
return -ENOENT;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config_base = ev;
event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
* exclude_hv bit to 0 so that we don't care what
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
event->attr.exclude_hv = 0;
/*
* If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
* If this machine has limited events, check whether this
* event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
* The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
return -EINVAL;
}
}
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
event->destroy = hw_perf_event_destroy;
return err;
}
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
.event_init = power_pmu_event_init,
.add = power_pmu_add,
.del = power_pmu_del,
.start = power_pmu_start,
.stop = power_pmu_stop,
.read = power_pmu_read,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL);
data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
if (perf_event_overflow(event, nmi, &data, regs))
power_pmu_stop(event, 0);
}
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
u32 flags = perf_get_misc_flags(regs);
if (flags)
return flags;
return user_mode(regs) ? PERF_RECORD_MISC_USER :
PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
* for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
/*
* Performance monitor interrupt stuff
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if ((int)val < 0)
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
}
static void power_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
static int __cpuinit
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
power_pmu_setup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
int register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(power_pmu_notifier);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3541_1 |
crossvul-cpp_data_bad_2020_7 | /*
* txtquery io
* Teodor Sigaev <teodor@stack.net>
* contrib/ltree/ltxtquery_io.c
*/
#include "postgres.h"
#include <ctype.h>
#include "crc32.h"
#include "ltree.h"
PG_FUNCTION_INFO_V1(ltxtq_in);
Datum ltxtq_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltxtq_out);
Datum ltxtq_out(PG_FUNCTION_ARGS);
/* parser's states */
#define WAITOPERAND 1
#define INOPERAND 2
#define WAITOPERATOR 3
/*
* node of query tree, also used
* for storing polish notation in parser
*/
typedef struct NODE
{
int32 type;
int32 val;
int16 distance;
int16 length;
uint16 flag;
struct NODE *next;
} NODE;
typedef struct
{
char *buf;
int32 state;
int32 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
int32 num;
/* user-friendly operand */
int32 lenop;
int32 sumlen;
char *op;
char *curop;
} QPRS_STATE;
/*
* get token from query string
*/
static int32
gettoken_query(QPRS_STATE *state, int32 *val, int32 *lenval, char **strval, uint16 *flag)
{
int charlen;
for (;;)
{
charlen = pg_mblen(state->buf);
switch (state->state)
{
case WAITOPERAND:
if (charlen == 1 && t_iseq(state->buf, '!'))
{
(state->buf)++;
*val = (int32) '!';
return OPR;
}
else if (charlen == 1 && t_iseq(state->buf, '('))
{
state->count++;
(state->buf)++;
return OPEN;
}
else if (ISALNUM(state->buf))
{
state->state = INOPERAND;
*strval = state->buf;
*lenval = charlen;
*flag = 0;
}
else if (!t_isspace(state->buf))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("operand syntax error")));
break;
case INOPERAND:
if (ISALNUM(state->buf))
{
if (*flag)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("modificators syntax error")));
*lenval += charlen;
}
else if (charlen == 1 && t_iseq(state->buf, '%'))
*flag |= LVAR_SUBLEXEME;
else if (charlen == 1 && t_iseq(state->buf, '@'))
*flag |= LVAR_INCASE;
else if (charlen == 1 && t_iseq(state->buf, '*'))
*flag |= LVAR_ANYEND;
else
{
state->state = WAITOPERATOR;
return VAL;
}
break;
case WAITOPERATOR:
if (charlen == 1 && (t_iseq(state->buf, '&') || t_iseq(state->buf, '|')))
{
state->state = WAITOPERAND;
*val = (int32) *(state->buf);
(state->buf)++;
return OPR;
}
else if (charlen == 1 && t_iseq(state->buf, ')'))
{
(state->buf)++;
state->count--;
return (state->count < 0) ? ERR : CLOSE;
}
else if (*(state->buf) == '\0')
return (state->count) ? ERR : END;
else if (charlen == 1 && !t_iseq(state->buf, ' '))
return ERR;
break;
default:
return ERR;
break;
}
state->buf += charlen;
}
}
/*
* push new one in polish notation reverse view
*/
static void
pushquery(QPRS_STATE *state, int32 type, int32 val, int32 distance, int32 lenval, uint16 flag)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
tmp->type = type;
tmp->val = val;
tmp->flag = flag;
if (distance > 0xffff)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("value is too big")));
if (lenval > 0xff)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("operand is too long")));
tmp->distance = distance;
tmp->length = lenval;
tmp->next = state->str;
state->str = tmp;
state->num++;
}
/*
* This function is used for query_txt parsing
*/
static void
pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
{
if (lenval > 0xffff)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
pushquery(state, type, ltree_crc32_sz(strval, lenval),
state->curop - state->op, lenval, flag);
while (state->curop - state->op + lenval + 1 >= state->lenop)
{
int32 tmp = state->curop - state->op;
state->lenop *= 2;
state->op = (char *) repalloc((void *) state->op, state->lenop);
state->curop = state->op + tmp;
}
memcpy((void *) state->curop, (void *) strval, lenval);
state->curop += lenval;
*(state->curop) = '\0';
state->curop++;
state->sumlen += lenval + 1;
return;
}
#define STACKDEPTH 32
/*
* make polish notaion of query
*/
static int32
makepol(QPRS_STATE *state)
{
int32 val = 0,
type;
int32 lenval = 0;
char *strval = NULL;
int32 stack[STACKDEPTH];
int32 lenstack = 0;
uint16 flag = 0;
while ((type = gettoken_query(state, &val, &lenval, &strval, &flag)) != END)
{
switch (type)
{
case VAL:
pushval_asis(state, VAL, strval, lenval, flag);
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
}
break;
case OPR:
if (lenstack && val == (int32) '|')
pushquery(state, OPR, val, 0, 0, 0);
else
{
if (lenstack == STACKDEPTH)
/* internal error */
elog(ERROR, "stack too short");
stack[lenstack] = val;
lenstack++;
}
break;
case OPEN:
if (makepol(state) == ERR)
return ERR;
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
}
break;
case CLOSE:
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
};
return END;
break;
case ERR:
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
return ERR;
}
}
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
};
return END;
}
static void
findoprnd(ITEM *ptr, int32 *pos)
{
if (ptr[*pos].type == VAL || ptr[*pos].type == VALTRUE)
{
ptr[*pos].left = 0;
(*pos)++;
}
else if (ptr[*pos].val == (int32) '!')
{
ptr[*pos].left = 1;
(*pos)++;
findoprnd(ptr, pos);
}
else
{
ITEM *curitem = &ptr[*pos];
int32 tmp = *pos;
(*pos)++;
findoprnd(ptr, pos);
curitem->left = *pos - tmp;
findoprnd(ptr, pos);
}
}
/*
* input
*/
static ltxtquery *
queryin(char *buf)
{
QPRS_STATE state;
int32 i;
ltxtquery *query;
int32 commonlen;
ITEM *ptr;
NODE *tmp;
int32 pos = 0;
#ifdef BS_DEBUG
char pbuf[16384],
*cur;
#endif
/* init state */
state.buf = buf;
state.state = WAITOPERAND;
state.count = 0;
state.num = 0;
state.str = NULL;
/* init list of operand */
state.sumlen = 0;
state.lenop = 64;
state.curop = state.op = (char *) palloc(state.lenop);
*(state.curop) = '\0';
/* parse query & make polish notation (postfix, but in reverse order) */
makepol(&state);
if (!state.num)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Empty query.")));
/* make finish struct */
commonlen = COMPUTESIZE(state.num, state.sumlen);
query = (ltxtquery *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
ptr = GETQUERY(query);
/* set item in polish notation */
for (i = 0; i < state.num; i++)
{
ptr[i].type = state.str->type;
ptr[i].val = state.str->val;
ptr[i].distance = state.str->distance;
ptr[i].length = state.str->length;
ptr[i].flag = state.str->flag;
tmp = state.str->next;
pfree(state.str);
state.str = tmp;
}
/* set user friendly-operand view */
memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen);
pfree(state.op);
/* set left operand's position for every operator */
pos = 0;
findoprnd(ptr, &pos);
return query;
}
/*
* in without morphology
*/
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(queryin((char *) PG_GETARG_POINTER(0)));
}
/*
* out function
*/
typedef struct
{
ITEM *curpol;
char *buf;
char *cur;
char *op;
int32 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) \
while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) \
{ \
int32 len = (inf)->cur - (inf)->buf; \
(inf)->buflen *= 2; \
(inf)->buf = (char*) repalloc( (void*)(inf)->buf, (inf)->buflen ); \
(inf)->cur = (inf)->buf + len; \
}
/*
* recursive walk on tree and print it in
* infix (human-readable) view
*/
static void
infix(INFIX *in, bool first)
{
if (in->curpol->type == VAL)
{
char *op = in->op + in->curpol->distance;
RESIZEBUF(in, in->curpol->length * 2 + 5);
while (*op)
{
*(in->cur) = *op;
op++;
in->cur++;
}
if (in->curpol->flag & LVAR_SUBLEXEME)
{
*(in->cur) = '%';
in->cur++;
}
if (in->curpol->flag & LVAR_INCASE)
{
*(in->cur) = '@';
in->cur++;
}
if (in->curpol->flag & LVAR_ANYEND)
{
*(in->cur) = '*';
in->cur++;
}
*(in->cur) = '\0';
in->curpol++;
}
else if (in->curpol->val == (int32) '!')
{
bool isopr = false;
RESIZEBUF(in, 1);
*(in->cur) = '!';
in->cur++;
*(in->cur) = '\0';
in->curpol++;
if (in->curpol->type == OPR)
{
isopr = true;
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
infix(in, isopr);
if (isopr)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
else
{
int32 op = in->curpol->val;
INFIX nrm;
in->curpol++;
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
nrm.curpol = in->curpol;
nrm.op = in->op;
nrm.buflen = 16;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
/* get right operand */
infix(&nrm, false);
/* get & print left operand */
in->curpol = nrm.curpol;
infix(in, false);
/* print operator & right operand */
RESIZEBUF(in, 3 + (nrm.cur - nrm.buf));
sprintf(in->cur, " %c %s", op, nrm.buf);
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
}
Datum
ltxtq_out(PG_FUNCTION_ARGS)
{
ltxtquery *query = PG_GETARG_LTXTQUERY(0);
INFIX nrm;
if (query->size == 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error"),
errdetail("Empty query.")));
nrm.curpol = GETQUERY(query);
nrm.buflen = 32;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
nrm.op = GETOPERAND(query);
infix(&nrm, true);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_POINTER(nrm.buf);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_7 |
crossvul-cpp_data_good_3498_7 | /*
* linux/kernel/time.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file contains the interface functions for the various
* time related system calls: time, stime, gettimeofday, settimeofday,
* adjtime
*/
/*
* Modification history kernel/time.c
*
* 1993-09-02 Philip Gladstone
* Created file with time related functions from sched.c and adjtimex()
* 1993-10-08 Torsten Duwe
* adjtime interface update and CMOS clock write code
* 1995-08-13 Torsten Duwe
* kernel PLL updated to 1994-12-13 specs (rfc-1589)
* 1999-01-16 Ulrich Windl
* Introduced error checking for many cases in adjtimex().
* Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
* Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
* (Even though the technical memorandum forbids it)
* 2004-07-14 Christoph Lameter
* Added getnstimeofday to allow the posix timer functions to return
* with nanosecond accuracy
*/
#include <linux/module.h>
#include <linux/timex.h>
#include <linux/capability.h>
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "timeconst.h"
/*
* The timezone where the local system is located. Used as a default by some
* programs who obtain this value by using gettimeofday.
*/
struct timezone sys_tz;
EXPORT_SYMBOL(sys_tz);
#ifdef __ARCH_WANT_SYS_TIME
/*
* sys_time() can be implemented in user-level using
* sys_gettimeofday(). Is this for backwards compatibility? If so,
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
asmlinkage long sys_time(time_t __user * tloc)
{
time_t i = get_seconds();
if (tloc) {
if (put_user(i,tloc))
i = -EFAULT;
}
return i;
}
/*
* sys_stime() can be implemented in user-level using
* sys_settimeofday(). Is this for backwards compatibility? If so,
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
asmlinkage long sys_stime(time_t __user *tptr)
{
struct timespec tv;
int err;
if (get_user(tv.tv_sec, tptr))
return -EFAULT;
tv.tv_nsec = 0;
err = security_settime(&tv, NULL);
if (err)
return err;
do_settimeofday(&tv);
return 0;
}
#endif /* __ARCH_WANT_SYS_TIME */
asmlinkage long sys_gettimeofday(struct timeval __user *tv,
struct timezone __user *tz)
{
if (likely(tv != NULL)) {
struct timeval ktv;
do_gettimeofday(&ktv);
if (copy_to_user(tv, &ktv, sizeof(ktv)))
return -EFAULT;
}
if (unlikely(tz != NULL)) {
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
return -EFAULT;
}
return 0;
}
/*
* Adjust the time obtained from the CMOS to be UTC time instead of
* local time.
*
* This is ugly, but preferable to the alternatives. Otherwise we
* would either need to write a program to do it in /etc/rc (and risk
* confusion if the program gets run more than once; it would also be
* hard to make the program warp the clock precisely n hours) or
* compile in the timezone information into the kernel. Bad, bad....
*
* - TYT, 1992-01-01
*
* The best thing to do is to keep the CMOS clock in universal time (UTC)
* as real UNIX machines always do it. This avoids all headaches about
* daylight saving times and warping kernel clocks.
*/
static inline void warp_clock(void)
{
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
update_xtime_cache(0);
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
/*
* In case for some reason the CMOS clock has not already been running
* in UTC, but in some local time: The first time we set the timezone,
* we will warp the clock so that it is ticking UTC time instead of
* local time. Presumably, if someone is setting the timezone then we
* are running in an environment where the programs understand about
* timezones. This should be done at boot time in the /etc/rc script,
* as soon as possible, so that the clock can be set right. Otherwise,
* various programs will get confused when the clock gets warped.
*/
int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
{
static int firsttime = 1;
int error = 0;
if (tv && !timespec_valid(tv))
return -EINVAL;
error = security_settime(tv, tz);
if (error)
return error;
if (tz) {
/* SMP safe, global irq locking makes it work. */
sys_tz = *tz;
update_vsyscall_tz();
if (firsttime) {
firsttime = 0;
if (!tv)
warp_clock();
}
}
if (tv)
{
/* SMP safe, again the code in arch/foo/time.c should
* globally block out interrupts when it runs.
*/
return do_settimeofday(tv);
}
return 0;
}
asmlinkage long sys_settimeofday(struct timeval __user *tv,
struct timezone __user *tz)
{
struct timeval user_tv;
struct timespec new_ts;
struct timezone new_tz;
if (tv) {
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
return -EFAULT;
new_ts.tv_sec = user_tv.tv_sec;
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
}
if (tz) {
if (copy_from_user(&new_tz, tz, sizeof(*tz)))
return -EFAULT;
}
return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
}
asmlinkage long sys_adjtimex(struct timex __user *txc_p)
{
struct timex txc; /* Local copy of parameter */
int ret;
/* Copy the user data space into the kernel copy
* structure. But bear in mind that the structures
* may change
*/
if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
return -EFAULT;
ret = do_adjtimex(&txc);
return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
}
/**
* current_fs_time - Return FS time
* @sb: Superblock.
*
* Return the current time truncated to the time granularity supported by
* the fs.
*/
struct timespec current_fs_time(struct super_block *sb)
{
struct timespec now = current_kernel_time();
return timespec_trunc(now, sb->s_time_gran);
}
EXPORT_SYMBOL(current_fs_time);
/*
* Convert jiffies to milliseconds and back.
*
* Avoid unnecessary multiplications/divisions in the
* two most common HZ cases:
*/
unsigned int inline jiffies_to_msecs(const unsigned long j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return (MSEC_PER_SEC / HZ) * j;
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return ((u64)HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
# else
return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_msecs);
unsigned int inline jiffies_to_usecs(const unsigned long j)
{
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j;
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return ((u64)HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
# else
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
# endif
#endif
}
EXPORT_SYMBOL(jiffies_to_usecs);
/**
* timespec_trunc - Truncate timespec to a granularity
* @t: Timespec
* @gran: Granularity in ns.
*
* Truncate a timespec to a granularity. gran must be smaller than a second.
* Always rounds down.
*
* This function should be only used for timestamps returned by
* current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
* it doesn't handle the better resolution of the latter.
*/
struct timespec timespec_trunc(struct timespec t, unsigned gran)
{
/*
* Division is pretty slow so avoid it for common cases.
* Currently current_kernel_time() never returns better than
* jiffies resolution. Exploit that.
*/
if (gran <= jiffies_to_usecs(1) * 1000) {
/* nothing */
} else if (gran == 1000000000) {
t.tv_nsec = 0;
} else {
t.tv_nsec -= t.tv_nsec % gran;
}
return t;
}
EXPORT_SYMBOL(timespec_trunc);
#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
* and therefore only yields usec accuracy
*/
void getnstimeofday(struct timespec *tv)
{
struct timeval x;
do_gettimeofday(&x);
tv->tv_sec = x.tv_sec;
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
* => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
*
* [For the Julian calendar (which was used in Russia before 1917,
* Britain & colonies before 1752, anywhere else before 1582,
* and is still in use by some communities) leave out the
* -year/100+year/400 terms, and add 10.]
*
* This algorithm was first published by Gauss (I think).
*
* WARNING: this function will overflow on 2106-02-07 06:28:16 on
* machines where long is 32-bit! (However, as time_t is signed, we
* will already get problems at other places on 2038-01-19 03:14:08)
*/
unsigned long
mktime(const unsigned int year0, const unsigned int mon0,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec)
{
unsigned int mon = mon0, year = year0;
/* 1..12 -> 11,12,1..10 */
if (0 >= (int) (mon -= 2)) {
mon += 12; /* Puts Feb last since it has leap day */
year -= 1;
}
return ((((unsigned long)
(year/4 - year/100 + year/400 + 367*mon/12 + day) +
year*365 - 719499
)*24 + hour /* now have hours */
)*60 + min /* now have minutes */
)*60 + sec; /* finally seconds */
}
EXPORT_SYMBOL(mktime);
/**
* set_normalized_timespec - set timespec sec and nsec parts and normalize
*
* @ts: pointer to timespec variable to be set
* @sec: seconds to set
* @nsec: nanoseconds to set
*
* Set seconds and nanoseconds field of a timespec variable and
* normalize to the timespec storage format
*
* Note: The tv_nsec part is always in the range of
* 0 <= tv_nsec < NSEC_PER_SEC
* For negative values only the tv_sec field is negative !
*/
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
{
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
EXPORT_SYMBOL(set_normalized_timespec);
/**
* ns_to_timespec - Convert nanoseconds to timespec
* @nsec: the nanoseconds value to be converted
*
* Returns the timespec representation of the nsec parameter.
*/
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
s32 rem;
if (!nsec)
return (struct timespec) {0, 0};
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
return ts;
}
EXPORT_SYMBOL(ns_to_timespec);
/**
* ns_to_timeval - Convert nanoseconds to timeval
* @nsec: the nanoseconds value to be converted
*
* Returns the timeval representation of the nsec parameter.
*/
struct timeval ns_to_timeval(const s64 nsec)
{
struct timespec ts = ns_to_timespec(nsec);
struct timeval tv;
tv.tv_sec = ts.tv_sec;
tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
return tv;
}
EXPORT_SYMBOL(ns_to_timeval);
/*
* When we convert to jiffies then we interpret incoming values
* the following way:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor
*
* We must also be careful about 32-bit overflows.
*/
unsigned long msecs_to_jiffies(const unsigned int m)
{
/*
* Negative value, means infinite timeout:
*/
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice
* round multiple of HZ, divide with the factor between them,
* but round upwards:
*/
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of
* 1000 - simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot
* overflow:
*/
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return m * (HZ / MSEC_PER_SEC);
#else
/*
* Generic case - multiply, round and divide. But first
* check that if we are doing a net multiplication, that
* we wouldn't overflow:
*/
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return ((u64)MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
>> MSEC_TO_HZ_SHR32;
#endif
}
EXPORT_SYMBOL(msecs_to_jiffies);
unsigned long usecs_to_jiffies(const unsigned int u)
{
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * (HZ / USEC_PER_SEC);
#else
return ((u64)USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
#endif
}
EXPORT_SYMBOL(usecs_to_jiffies);
/*
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
* that a remainder subtract here would not do the right thing as the
* resolution values don't fall on second boundries. I.e. the line:
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
*
* Rather, we just shift the bits off the right.
*
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
* value to a scaled second value.
*/
unsigned long
timespec_to_jiffies(const struct timespec *value)
{
unsigned long sec = value->tv_sec;
long nsec = value->tv_nsec + TICK_NSEC - 1;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
nsec = 0;
}
return (((u64)sec * SEC_CONVERSION) +
(((u64)nsec * NSEC_CONVERSION) >>
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
EXPORT_SYMBOL(timespec_to_jiffies);
void
jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u32 rem;
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_nsec = rem;
}
EXPORT_SYMBOL(jiffies_to_timespec);
/* Same for "timeval"
*
* Well, almost. The problem here is that the real system resolution is
* in nanoseconds and the value being converted is in micro seconds.
* Also for some machines (those that use HZ = 1024, in-particular),
* there is a LARGE error in the tick size in microseconds.
* The solution we use is to do the rounding AFTER we convert the
* microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
* Instruction wise, this should cost only an additional add with carry
* instruction above the way it was done above.
*/
unsigned long
timeval_to_jiffies(const struct timeval *value)
{
unsigned long sec = value->tv_sec;
long usec = value->tv_usec;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
usec = 0;
}
return (((u64)sec * SEC_CONVERSION) +
(((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
(USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
EXPORT_SYMBOL(timeval_to_jiffies);
void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u32 rem;
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
EXPORT_SYMBOL(jiffies_to_timeval);
/*
* Convert jiffies/jiffies_64 to clock_t and back.
*/
clock_t jiffies_to_clock_t(long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
return x * (USER_HZ / HZ);
# else
return x / (HZ / USER_HZ);
# endif
#else
return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
#endif
}
EXPORT_SYMBOL(jiffies_to_clock_t);
unsigned long clock_t_to_jiffies(unsigned long x)
{
#if (HZ % USER_HZ)==0
if (x >= ~0UL / (HZ / USER_HZ))
return ~0UL;
return x * (HZ / USER_HZ);
#else
/* Don't worry about loss of precision here .. */
if (x >= ~0UL / HZ * USER_HZ)
return ~0UL;
/* .. but do try to contain it here */
return div_u64((u64)x * HZ, USER_HZ);
#endif
}
EXPORT_SYMBOL(clock_t_to_jiffies);
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
x = div_u64(x * USER_HZ, HZ);
# elif HZ > USER_HZ
x = div_u64(x, HZ / USER_HZ);
# else
/* Nothing to do */
# endif
#else
/*
* There are better ways that don't overflow early,
* but even this doesn't overflow in hundreds of years
* in 64 bits, so..
*/
x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
#endif
return x;
}
EXPORT_SYMBOL(jiffies_64_to_clock_t);
u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
unsigned long seq;
u64 ret;
do {
seq = read_seqbegin(&xtime_lock);
ret = jiffies_64;
} while (read_seqretry(&xtime_lock, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
#endif
EXPORT_SYMBOL(jiffies);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3498_7 |
crossvul-cpp_data_bad_728_0 | // SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2014, STMicroelectronics International N.V.
* Copyright (c) 2015-2017 Linaro Limited
*/
#include <assert.h>
#include <compiler.h>
#include <ctype.h>
#include <initcall.h>
#include <keep.h>
#include <kernel/panic.h>
#include <kernel/tee_misc.h>
#include <kernel/tee_ta_manager.h>
#include <kernel/thread.h>
#include <kernel/user_ta.h>
#include <mm/core_memprot.h>
#include <mm/core_mmu.h>
#include <mm/mobj.h>
#include <mm/pgt_cache.h>
#include <mm/tee_mm.h>
#include <mm/tee_mmu.h>
#include <mm/tee_pager.h>
#include <signed_hdr.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/queue.h>
#include <ta_pub_key.h>
#include <tee/tee_cryp_utl.h>
#include <tee/tee_obj.h>
#include <tee/tee_svc_cryp.h>
#include <tee/tee_svc.h>
#include <tee/tee_svc_storage.h>
#include <tee/uuid.h>
#include <trace.h>
#include <types_ext.h>
#include <utee_defines.h>
#include <util.h>
#include "elf_common.h"
#include "elf_load.h"
#include "elf_load_dyn.h"
/* ELF file used by a TA (main executable or dynamic library) */
struct user_ta_elf {
TEE_UUID uuid;
struct elf_load_state *elf_state;
struct mobj *mobj_code;
vaddr_t load_addr;
vaddr_t exidx_start; /* 32-bit ELF only */
size_t exidx_size;
struct load_seg *segs;
size_t num_segs;
TAILQ_ENTRY(user_ta_elf) link;
};
static void free_elfs(struct user_ta_elf_head *elfs)
{
struct user_ta_elf *elf;
struct user_ta_elf *next;
TAILQ_FOREACH_SAFE(elf, elfs, link, next) {
TAILQ_REMOVE(elfs, elf, link);
mobj_free(elf->mobj_code);
free(elf->segs);
free(elf);
}
}
static struct user_ta_elf *find_ta_elf(const TEE_UUID *uuid,
struct user_ta_ctx *utc)
{
struct user_ta_elf *elf;
TAILQ_FOREACH(elf, &utc->elfs, link)
if (!memcmp(&elf->uuid, uuid, sizeof(*uuid)))
return elf;
return NULL;
}
static struct user_ta_elf *ta_elf(const TEE_UUID *uuid,
struct user_ta_ctx *utc)
{
struct user_ta_elf *elf;
elf = find_ta_elf(uuid, utc);
if (elf)
goto out;
elf = calloc(1, sizeof(*elf));
if (!elf)
goto out;
elf->uuid = *uuid;
TAILQ_INSERT_TAIL(&utc->elfs, elf, link);
out:
return elf;
}
static uint32_t elf_flags_to_mattr(uint32_t flags)
{
uint32_t mattr = 0;
if (flags & PF_X)
mattr |= TEE_MATTR_UX;
if (flags & PF_W)
mattr |= TEE_MATTR_UW;
if (flags & PF_R)
mattr |= TEE_MATTR_UR;
return mattr;
}
struct load_seg {
vaddr_t offs;
uint32_t flags;
vaddr_t oend;
vaddr_t va;
size_t size;
};
static TEE_Result get_elf_segments(struct user_ta_elf *elf,
struct load_seg **segs_ret,
size_t *num_segs_ret)
{
struct elf_load_state *elf_state = elf->elf_state;
TEE_Result res;
size_t idx = 0;
size_t num_segs = 0;
struct load_seg *segs = NULL;
/*
* Add code segment
*/
while (true) {
vaddr_t va;
size_t size;
uint32_t flags;
uint32_t type;
res = elf_load_get_next_segment(elf_state, &idx, &va, &size,
&flags, &type);
if (res == TEE_ERROR_ITEM_NOT_FOUND)
break;
if (res != TEE_SUCCESS)
return res;
if (type == PT_LOAD) {
void *p = realloc(segs, (num_segs + 1) * sizeof(*segs));
if (!p) {
free(segs);
return TEE_ERROR_OUT_OF_MEMORY;
}
segs = p;
segs[num_segs].offs = ROUNDDOWN(va, SMALL_PAGE_SIZE);
segs[num_segs].oend = ROUNDUP(va + size,
SMALL_PAGE_SIZE);
segs[num_segs].flags = flags;
num_segs++;
} else if (type == PT_ARM_EXIDX) {
elf->exidx_start = va;
elf->exidx_size = size;
}
}
idx = 1;
while (idx < num_segs) {
size_t this_size = segs[idx].oend - segs[idx].offs;
size_t prev_size = segs[idx - 1].oend - segs[idx - 1].offs;
if (core_is_buffer_intersect(segs[idx].offs, this_size,
segs[idx - 1].offs, prev_size)) {
/* Merge the segments and their attributes */
segs[idx - 1].oend = MAX(segs[idx - 1].oend,
segs[idx].oend);
segs[idx - 1].flags |= segs[idx].flags;
/* Remove this index */
memcpy(segs + idx, segs + idx + 1,
(num_segs - idx - 1) * sizeof(*segs));
num_segs--;
} else {
idx++;
}
}
*segs_ret = segs;
*num_segs_ret = num_segs;
return TEE_SUCCESS;
}
static struct mobj *alloc_ta_mem(size_t size)
{
#ifdef CFG_PAGED_USER_TA
return mobj_paged_alloc(size);
#else
struct mobj *mobj = mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr);
if (mobj)
memset(mobj_get_va(mobj, 0), 0, size);
return mobj;
#endif
}
static void init_utee_param(struct utee_params *up,
const struct tee_ta_param *p, void *va[TEE_NUM_PARAMS])
{
size_t n;
up->types = p->types;
for (n = 0; n < TEE_NUM_PARAMS; n++) {
uintptr_t a;
uintptr_t b;
switch (TEE_PARAM_TYPE_GET(p->types, n)) {
case TEE_PARAM_TYPE_MEMREF_INPUT:
case TEE_PARAM_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_TYPE_MEMREF_INOUT:
a = (uintptr_t)va[n];
b = p->u[n].mem.size;
break;
case TEE_PARAM_TYPE_VALUE_INPUT:
case TEE_PARAM_TYPE_VALUE_INOUT:
a = p->u[n].val.a;
b = p->u[n].val.b;
break;
default:
a = 0;
b = 0;
break;
}
/* See comment for struct utee_params in utee_types.h */
up->vals[n * 2] = a;
up->vals[n * 2 + 1] = b;
}
}
static void update_from_utee_param(struct tee_ta_param *p,
const struct utee_params *up)
{
size_t n;
for (n = 0; n < TEE_NUM_PARAMS; n++) {
switch (TEE_PARAM_TYPE_GET(p->types, n)) {
case TEE_PARAM_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_TYPE_MEMREF_INOUT:
/* See comment for struct utee_params in utee_types.h */
p->u[n].mem.size = up->vals[n * 2 + 1];
break;
case TEE_PARAM_TYPE_VALUE_OUTPUT:
case TEE_PARAM_TYPE_VALUE_INOUT:
/* See comment for struct utee_params in utee_types.h */
p->u[n].val.a = up->vals[n * 2];
p->u[n].val.b = up->vals[n * 2 + 1];
break;
default:
break;
}
}
}
static void clear_vfp_state(struct user_ta_ctx *utc __unused)
{
#ifdef CFG_WITH_VFP
thread_user_clear_vfp(&utc->vfp);
#endif
}
static TEE_Result user_ta_enter(TEE_ErrorOrigin *err,
struct tee_ta_session *session,
enum utee_entry_func func, uint32_t cmd,
struct tee_ta_param *param)
{
TEE_Result res;
struct utee_params *usr_params;
uaddr_t usr_stack;
struct user_ta_ctx *utc = to_user_ta_ctx(session->ctx);
TEE_ErrorOrigin serr = TEE_ORIGIN_TEE;
struct tee_ta_session *s __maybe_unused;
void *param_va[TEE_NUM_PARAMS] = { NULL };
/* Map user space memory */
res = tee_mmu_map_param(utc, param, param_va);
if (res != TEE_SUCCESS)
goto cleanup_return;
/* Switch to user ctx */
tee_ta_push_current_session(session);
/* Make room for usr_params at top of stack */
usr_stack = utc->stack_addr + utc->mobj_stack->size;
usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT);
usr_params = (struct utee_params *)usr_stack;
init_utee_param(usr_params, param, param_va);
res = thread_enter_user_mode(func, tee_svc_kaddr_to_uref(session),
(vaddr_t)usr_params, cmd, usr_stack,
utc->entry_func, utc->is_32bit,
&utc->ctx.panicked, &utc->ctx.panic_code);
clear_vfp_state(utc);
/*
* According to GP spec the origin should allways be set to the
* TA after TA execution
*/
serr = TEE_ORIGIN_TRUSTED_APP;
if (utc->ctx.panicked) {
DMSG("tee_user_ta_enter: TA panicked with code 0x%x\n",
utc->ctx.panic_code);
serr = TEE_ORIGIN_TEE;
res = TEE_ERROR_TARGET_DEAD;
}
/* Copy out value results */
update_from_utee_param(param, usr_params);
s = tee_ta_pop_current_session();
assert(s == session);
cleanup_return:
/*
* Clear the cancel state now that the user TA has returned. The next
* time the TA will be invoked will be with a new operation and should
* not have an old cancellation pending.
*/
session->cancel = false;
/*
* Can't update *err until now since it may point to an address
* mapped for the user mode TA.
*/
*err = serr;
return res;
}
static TEE_Result user_ta_enter_open_session(struct tee_ta_session *s,
struct tee_ta_param *param, TEE_ErrorOrigin *eo)
{
return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_OPEN_SESSION, 0, param);
}
static TEE_Result user_ta_enter_invoke_cmd(struct tee_ta_session *s,
uint32_t cmd, struct tee_ta_param *param,
TEE_ErrorOrigin *eo)
{
return user_ta_enter(eo, s, UTEE_ENTRY_FUNC_INVOKE_COMMAND, cmd, param);
}
static void user_ta_enter_close_session(struct tee_ta_session *s)
{
TEE_ErrorOrigin eo;
struct tee_ta_param param = { 0 };
user_ta_enter(&eo, s, UTEE_ENTRY_FUNC_CLOSE_SESSION, 0, ¶m);
}
static int elf_idx(struct user_ta_ctx *utc, vaddr_t r_va, size_t r_size)
{
struct user_ta_elf *elf;
int idx = 0;
TAILQ_FOREACH(elf, &utc->elfs, link) {
size_t n;
for (n = 0; n < elf->num_segs; n++)
if (elf->segs[n].va == r_va &&
elf->segs[n].size == r_size)
return idx;
idx++;
}
return -1;
}
static void describe_region(struct user_ta_ctx *utc, vaddr_t va, size_t size,
char *desc, size_t desc_size)
{
int idx;
if (!desc_size)
return;
idx = elf_idx(utc, va, size);
if (idx != -1)
snprintf(desc, desc_size, "[%d]", idx);
else
desc[0] = '\0';
desc[desc_size - 1] = '\0';
}
static void show_elfs(struct user_ta_ctx *utc)
{
struct user_ta_elf *elf;
size_t __maybe_unused idx = 0;
TAILQ_FOREACH(elf, &utc->elfs, link)
EMSG_RAW(" [%zu] %pUl @ %#" PRIxVA, idx++,
(void *)&elf->uuid, elf->load_addr);
}
static void user_ta_dump_state(struct tee_ta_ctx *ctx)
{
struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
struct vm_region *r;
char flags[7] = { '\0', };
char desc[13];
size_t n = 0;
EMSG_RAW(" arch: %s load address: %#" PRIxVA " ctx-idr: %d",
utc->is_32bit ? "arm" : "aarch64", utc->load_addr,
utc->vm_info->asid);
EMSG_RAW(" stack: 0x%" PRIxVA " %zu",
utc->stack_addr, utc->mobj_stack->size);
TAILQ_FOREACH(r, &utc->vm_info->regions, link) {
paddr_t pa = 0;
if (r->mobj)
mobj_get_pa(r->mobj, r->offset, 0, &pa);
mattr_perm_to_str(flags, sizeof(flags), r->attr);
describe_region(utc, r->va, r->size, desc, sizeof(desc));
EMSG_RAW(" region %zu: va %#" PRIxVA " pa %#" PRIxPA
" size %#zx flags %s %s",
n, r->va, pa, r->size, flags, desc);
n++;
}
show_elfs(utc);
}
KEEP_PAGER(user_ta_dump_state);
static void release_ta_memory_by_mobj(struct mobj *mobj)
{
void *va;
if (!mobj)
return;
va = mobj_get_va(mobj, 0);
if (!va)
return;
memset(va, 0, mobj->size);
cache_op_inner(DCACHE_AREA_CLEAN, va, mobj->size);
}
static void free_utc(struct user_ta_ctx *utc)
{
struct user_ta_elf *elf;
tee_pager_rem_uta_areas(utc);
TAILQ_FOREACH(elf, &utc->elfs, link)
release_ta_memory_by_mobj(elf->mobj_code);
release_ta_memory_by_mobj(utc->mobj_stack);
release_ta_memory_by_mobj(utc->mobj_exidx);
/*
* Close sessions opened by this TA
* Note that tee_ta_close_session() removes the item
* from the utc->open_sessions list.
*/
while (!TAILQ_EMPTY(&utc->open_sessions)) {
tee_ta_close_session(TAILQ_FIRST(&utc->open_sessions),
&utc->open_sessions, KERN_IDENTITY);
}
vm_info_final(utc);
mobj_free(utc->mobj_stack);
mobj_free(utc->mobj_exidx);
free_elfs(&utc->elfs);
/* Free cryp states created by this TA */
tee_svc_cryp_free_states(utc);
/* Close cryp objects opened by this TA */
tee_obj_close_all(utc);
/* Free emums created by this TA */
tee_svc_storage_close_all_enum(utc);
free(utc);
}
static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx)
{
free_utc(to_user_ta_ctx(ctx));
}
static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx)
{
return to_user_ta_ctx(ctx)->vm_info->asid;
}
static const struct tee_ta_ops user_ta_ops __rodata_unpaged = {
.enter_open_session = user_ta_enter_open_session,
.enter_invoke_cmd = user_ta_enter_invoke_cmd,
.enter_close_session = user_ta_enter_close_session,
.dump_state = user_ta_dump_state,
.destroy = user_ta_ctx_destroy,
.get_instance_id = user_ta_get_instance_id,
};
/*
* Break unpaged attribute dependency propagation to user_ta_ops structure
* content thanks to a runtime initialization of the ops reference.
*/
static struct tee_ta_ops const *_user_ta_ops;
static TEE_Result init_user_ta(void)
{
_user_ta_ops = &user_ta_ops;
return TEE_SUCCESS;
}
service_init(init_user_ta);
static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
{
ctx->ops = _user_ta_ops;
}
bool is_user_ta_ctx(struct tee_ta_ctx *ctx)
{
return ctx->ops == _user_ta_ops;
}
static TEE_Result check_ta_store(void)
{
const struct user_ta_store_ops *op = NULL;
SCATTERED_ARRAY_FOREACH(op, ta_stores, struct user_ta_store_ops)
DMSG("TA store: \"%s\"", op->description);
return TEE_SUCCESS;
}
service_init(check_ta_store);
#ifdef CFG_TA_DYNLINK
static int hex(char c)
{
char lc = tolower(c);
if (isdigit(lc))
return lc - '0';
if (isxdigit(lc))
return lc - 'a' + 10;
return -1;
}
static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
{
uint32_t v = 0;
size_t n;
int c;
for (n = 0; n < nchars; n++) {
c = hex(s[n]);
if (c == (char)-1) {
*res = TEE_ERROR_BAD_FORMAT;
goto out;
}
v = (v << 4) + c;
}
*res = TEE_SUCCESS;
out:
return v;
}
/*
* Convert a UUID string @s into a TEE_UUID @uuid
* Expected format for @s is: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
* 'x' being any hexadecimal digit (0-9a-fA-F)
*/
static TEE_Result parse_uuid(const char *s, TEE_UUID *uuid)
{
TEE_Result res = TEE_SUCCESS;
TEE_UUID u = { 0 };
const char *p = s;
size_t i;
if (strlen(p) != 36)
return TEE_ERROR_BAD_FORMAT;
if (p[8] != '-' || p[13] != '-' || p[18] != '-' || p[23] != '-')
return TEE_ERROR_BAD_FORMAT;
u.timeLow = parse_hex(p, 8, &res);
if (res)
goto out;
p += 9;
u.timeMid = parse_hex(p, 4, &res);
if (res)
goto out;
p += 5;
u.timeHiAndVersion = parse_hex(p, 4, &res);
if (res)
goto out;
p += 5;
for (i = 0; i < 8; i++) {
u.clockSeqAndNode[i] = parse_hex(p, 2, &res);
if (res)
goto out;
if (i == 1)
p += 3;
else
p += 2;
}
*uuid = u;
out:
return res;
}
static TEE_Result add_elf_deps(struct user_ta_ctx *utc, char **deps,
size_t num_deps)
{
struct user_ta_elf *libelf;
TEE_Result res = TEE_SUCCESS;
TEE_UUID u;
size_t n;
for (n = 0; n < num_deps; n++) {
res = parse_uuid(deps[n], &u);
if (res) {
EMSG("Invalid dependency (not a UUID): %s", deps[n]);
goto out;
}
DMSG("Library needed: %pUl", (void *)&u);
libelf = ta_elf(&u, utc);
if (!libelf) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto out;
}
}
out:
return res;
}
static TEE_Result resolve_symbol(struct user_ta_elf_head *elfs,
const char *name, uintptr_t *val)
{
struct user_ta_elf *elf;
TEE_Result res;
/*
* The loop naturally implements a breadth first search due to the
* order in which the libraries were added.
*/
TAILQ_FOREACH(elf, elfs, link) {
res = elf_resolve_symbol(elf->elf_state, name, val);
if (res == TEE_ERROR_ITEM_NOT_FOUND)
continue;
if (res)
return res;
*val += elf->load_addr;
FMSG("%pUl/0x%" PRIxPTR " %s", (void *)&elf->uuid, *val, name);
return TEE_SUCCESS;
}
return TEE_ERROR_ITEM_NOT_FOUND;
}
static TEE_Result add_deps(struct user_ta_ctx *utc,
struct elf_load_state *state, vaddr_t load_addr)
{
char **deps = NULL;
size_t num_deps = 0;
TEE_Result res;
res = elf_get_needed(state, load_addr, &deps, &num_deps);
if (res)
return res;
res = add_elf_deps(utc, deps, num_deps);
free(deps);
return res;
}
#else
static TEE_Result (*resolve_symbol)(struct user_ta_elf_head *, const char *,
uintptr_t *);
static TEE_Result add_deps(struct user_ta_ctx *utc __unused,
struct elf_load_state *state __unused,
vaddr_t load_addr __unused)
{
return TEE_SUCCESS;
}
#endif
static TEE_Result load_elf_from_store(const TEE_UUID *uuid,
const struct user_ta_store_ops *ta_store,
struct user_ta_ctx *utc)
{
struct user_ta_store_handle *handle = NULL;
struct elf_load_state *elf_state = NULL;
struct ta_head *ta_head;
struct user_ta_elf *exe;
struct user_ta_elf *elf;
struct user_ta_elf *prev;
TEE_Result res;
size_t vasize;
void *p;
size_t n;
size_t num_segs = 0;
struct load_seg *segs = NULL;
res = ta_store->open(uuid, &handle);
if (res)
return res;
elf = ta_elf(uuid, utc);
if (!elf) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto out;
}
exe = TAILQ_FIRST(&utc->elfs);
prev = TAILQ_PREV(elf, user_ta_elf_head, link);
res = elf_load_init(ta_store, handle, elf == exe, &utc->elfs,
resolve_symbol, &elf_state);
if (res)
goto out;
elf->elf_state = elf_state;
res = elf_load_head(elf_state,
elf == exe ? sizeof(struct ta_head) : 0,
&p, &vasize, &utc->is_32bit);
if (res)
goto out;
ta_head = p;
elf->mobj_code = alloc_ta_mem(vasize);
if (!elf->mobj_code) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto out;
}
if (elf == exe) {
/* Ensure proper alignment of stack */
size_t stack_sz = ROUNDUP(ta_head->stack_size,
STACK_ALIGNMENT);
utc->mobj_stack = alloc_ta_mem(stack_sz);
if (!utc->mobj_stack) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto out;
}
}
/*
* Map physical memory into TA virtual memory
*/
if (elf == exe) {
res = vm_info_init(utc);
if (res != TEE_SUCCESS)
goto out;
/* Add stack segment */
utc->stack_addr = 0;
res = vm_map(utc, &utc->stack_addr, utc->mobj_stack->size,
TEE_MATTR_URW | TEE_MATTR_PRW, utc->mobj_stack,
0);
if (res)
goto out;
}
res = get_elf_segments(elf, &segs, &num_segs);
if (res != TEE_SUCCESS)
goto out;
if (prev) {
elf->load_addr = prev->load_addr + prev->mobj_code->size;
elf->load_addr = ROUNDUP(elf->load_addr,
CORE_MMU_USER_CODE_SIZE);
}
for (n = 0; n < num_segs; n++) {
uint32_t prot = elf_flags_to_mattr(segs[n].flags) |
TEE_MATTR_PRW;
segs[n].va = elf->load_addr - segs[0].offs + segs[n].offs;
segs[n].size = segs[n].oend - segs[n].offs;
res = vm_map(utc, &segs[n].va, segs[n].size, prot,
elf->mobj_code, segs[n].offs);
if (res)
goto out;
if (!n) {
elf->load_addr = segs[0].va;
DMSG("ELF load address %#" PRIxVA, elf->load_addr);
}
}
tee_mmu_set_ctx(&utc->ctx);
res = elf_load_body(elf_state, elf->load_addr);
if (res)
goto out;
/* Find any external dependency (dynamically linked libraries) */
res = add_deps(utc, elf_state, elf->load_addr);
out:
if (res) {
free(segs);
} else {
elf->segs = segs;
elf->num_segs = num_segs;
}
ta_store->close(handle);
/* utc is cleaned by caller on error */
return res;
}
/* Loads a single ELF file (main executable or library) */
static TEE_Result load_elf(const TEE_UUID *uuid, struct user_ta_ctx *utc)
{
TEE_Result res;
const struct user_ta_store_ops *op = NULL;
SCATTERED_ARRAY_FOREACH(op, ta_stores, struct user_ta_store_ops) {
DMSG("Lookup user TA ELF %pUl (%s)", (void *)uuid,
op->description);
res = load_elf_from_store(uuid, op, utc);
if (res == TEE_ERROR_ITEM_NOT_FOUND)
continue;
if (res) {
DMSG("res=0x%x", res);
continue;
}
return res;
}
return TEE_ERROR_ITEM_NOT_FOUND;
}
static void free_elf_states(struct user_ta_ctx *utc)
{
struct user_ta_elf *elf;
TAILQ_FOREACH(elf, &utc->elfs, link)
elf_load_final(elf->elf_state);
}
static TEE_Result set_seg_prot(struct user_ta_ctx *utc,
struct user_ta_elf *elf)
{
TEE_Result res;
size_t n;
for (n = 0; n < elf->num_segs; n++) {
struct load_seg *seg = &elf->segs[n];
res = vm_set_prot(utc, seg->va, seg->size,
elf_flags_to_mattr(seg->flags));
if (res)
break;
}
return res;
}
#ifdef CFG_UNWIND
/*
* 32-bit TAs: set the address and size of the exception index table (EXIDX).
* If the TA contains only one ELF, we point to its table. Otherwise, a
* consolidated table is made by concatenating the tables found in each ELF and
* adjusting their content to account for the offset relative to the original
* location.
*/
static TEE_Result set_exidx(struct user_ta_ctx *utc)
{
struct user_ta_elf *exe;
struct user_ta_elf *elf;
struct user_ta_elf *last_elf;
vaddr_t exidx;
size_t exidx_sz = 0;
TEE_Result res;
uint8_t *p;
if (!utc->is_32bit)
return TEE_SUCCESS;
exe = TAILQ_FIRST(&utc->elfs);
if (!TAILQ_NEXT(exe, link)) {
/* We have a single ELF: simply reference its table */
utc->exidx_start = exe->exidx_start;
utc->exidx_size = exe->exidx_size;
return TEE_SUCCESS;
}
last_elf = TAILQ_LAST(&utc->elfs, user_ta_elf_head);
TAILQ_FOREACH(elf, &utc->elfs, link)
exidx_sz += elf->exidx_size;
if (!exidx_sz) {
/* The empty table from first segment will fit */
utc->exidx_start = exe->exidx_start;
utc->exidx_size = exe->exidx_size;
return TEE_SUCCESS;
}
utc->mobj_exidx = alloc_ta_mem(exidx_sz);
if (!utc->mobj_exidx)
return TEE_ERROR_OUT_OF_MEMORY;
exidx = ROUNDUP(last_elf->load_addr + last_elf->mobj_code->size,
CORE_MMU_USER_CODE_SIZE);
res = vm_map(utc, &exidx, exidx_sz, TEE_MATTR_UR | TEE_MATTR_PRW,
utc->mobj_exidx, 0);
if (res)
goto err;
DMSG("New EXIDX table mapped at 0x%" PRIxVA " size %zu",
exidx, exidx_sz);
p = (void *)exidx;
TAILQ_FOREACH(elf, &utc->elfs, link) {
void *e_exidx = (void *)(elf->exidx_start + elf->load_addr);
size_t e_exidx_sz = elf->exidx_size;
int32_t offs = (int32_t)((vaddr_t)e_exidx - (vaddr_t)p);
memcpy(p, e_exidx, e_exidx_sz);
res = relocate_exidx(p, e_exidx_sz, offs);
if (res)
goto err;
p += e_exidx_sz;
}
/*
* Drop privileged mode permissions. Normally we should keep
* TEE_MATTR_PR because the code that accesses this table runs in
* privileged mode. However, privileged read is always enabled if
* unprivileged read is enabled, so it doesn't matter. For consistency
* with other ELF section mappings, let's clear all the privileged
* permission bits.
*/
res = vm_set_prot(utc, exidx,
ROUNDUP(exidx_sz, SMALL_PAGE_SIZE),
TEE_MATTR_UR);
if (res)
goto err;
utc->exidx_start = exidx - utc->load_addr;
utc->exidx_size = exidx_sz;
return TEE_SUCCESS;
err:
mobj_free(utc->mobj_exidx);
utc->mobj_exidx = NULL;
return res;
}
#else /* CFG_UNWIND */
static TEE_Result set_exidx(struct user_ta_ctx *utc __unused)
{
return TEE_SUCCESS;
}
#endif /* CFG_UNWIND */
TEE_Result tee_ta_init_user_ta_session(const TEE_UUID *uuid,
struct tee_ta_session *s)
{
TEE_Result res;
struct user_ta_ctx *utc = NULL;
struct ta_head *ta_head;
struct user_ta_elf *exe;
struct user_ta_elf *elf;
/* Register context */
utc = calloc(1, sizeof(struct user_ta_ctx));
if (!utc)
return TEE_ERROR_OUT_OF_MEMORY;
TAILQ_INIT(&utc->open_sessions);
TAILQ_INIT(&utc->cryp_states);
TAILQ_INIT(&utc->objects);
TAILQ_INIT(&utc->storage_enums);
TAILQ_INIT(&utc->elfs);
/*
* Set context TA operation structure. It is required by generic
* implementation to identify userland TA versus pseudo TA contexts.
*/
set_ta_ctx_ops(&utc->ctx);
/*
* Create entry for the main executable
*/
exe = ta_elf(uuid, utc);
if (!exe) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto err;
}
/*
* Load binaries and map them into the TA virtual memory. load_elf()
* may add external libraries to the list, so the loop will end when
* all the dependencies are satisfied or an error occurs.
*/
TAILQ_FOREACH(elf, &utc->elfs, link) {
res = load_elf(&elf->uuid, utc);
if (res)
goto err;
}
/*
* Perform relocations and apply final memory attributes
*/
TAILQ_FOREACH(elf, &utc->elfs, link) {
DMSG("Processing relocations in %pUl", (void *)&elf->uuid);
res = elf_process_rel(elf->elf_state, elf->load_addr);
if (res)
goto err;
res = set_seg_prot(utc, elf);
if (res)
goto err;
}
utc->load_addr = exe->load_addr;
res = set_exidx(utc);
if (res)
goto err;
ta_head = (struct ta_head *)(vaddr_t)utc->load_addr;
if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) {
res = TEE_ERROR_SECURITY;
goto err;
}
if (ta_head->flags & ~TA_FLAGS_MASK) {
EMSG("Invalid TA flag(s) 0x%" PRIx32,
ta_head->flags & ~TA_FLAGS_MASK);
res = TEE_ERROR_BAD_FORMAT;
goto err;
}
utc->ctx.flags = ta_head->flags;
utc->ctx.uuid = ta_head->uuid;
utc->entry_func = ta_head->entry.ptr64;
utc->ctx.ref_count = 1;
condvar_init(&utc->ctx.busy_cv);
TAILQ_INSERT_TAIL(&tee_ctxes, &utc->ctx, link);
s->ctx = &utc->ctx;
free_elf_states(utc);
tee_mmu_set_ctx(NULL);
return TEE_SUCCESS;
err:
free_elf_states(utc);
tee_mmu_set_ctx(NULL);
free_utc(utc);
return res;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_728_0 |
crossvul-cpp_data_good_2020_4 | /*
* contrib/intarray/_int_bool.c
*/
#include "postgres.h"
#include "miscadmin.h"
#include "utils/builtins.h"
#include "_int.h"
PG_FUNCTION_INFO_V1(bqarr_in);
PG_FUNCTION_INFO_V1(bqarr_out);
Datum bqarr_in(PG_FUNCTION_ARGS);
Datum bqarr_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(boolop);
Datum boolop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(rboolop);
Datum rboolop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(querytree);
Datum querytree(PG_FUNCTION_ARGS);
/* parser's states */
#define WAITOPERAND 1
#define WAITENDOPERAND 2
#define WAITOPERATOR 3
/*
* node of query tree, also used
* for storing polish notation in parser
*/
typedef struct NODE
{
int32 type;
int32 val;
struct NODE *next;
} NODE;
typedef struct
{
char *buf;
int32 state;
int32 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
int32 num;
} WORKSTATE;
/*
* get token from query string
*/
static int32
gettoken(WORKSTATE *state, int32 *val)
{
char nnn[16];
int innn;
*val = 0; /* default result */
innn = 0;
while (1)
{
if (innn >= sizeof(nnn))
return ERR; /* buffer overrun => syntax error */
switch (state->state)
{
case WAITOPERAND:
innn = 0;
if ((*(state->buf) >= '0' && *(state->buf) <= '9') ||
*(state->buf) == '-')
{
state->state = WAITENDOPERAND;
nnn[innn++] = *(state->buf);
}
else if (*(state->buf) == '!')
{
(state->buf)++;
*val = (int32) '!';
return OPR;
}
else if (*(state->buf) == '(')
{
state->count++;
(state->buf)++;
return OPEN;
}
else if (*(state->buf) != ' ')
return ERR;
break;
case WAITENDOPERAND:
if (*(state->buf) >= '0' && *(state->buf) <= '9')
{
nnn[innn++] = *(state->buf);
}
else
{
long lval;
nnn[innn] = '\0';
errno = 0;
lval = strtol(nnn, NULL, 0);
*val = (int32) lval;
if (errno != 0 || (long) *val != lval)
return ERR;
state->state = WAITOPERATOR;
return (state->count && *(state->buf) == '\0')
? ERR : VAL;
}
break;
case WAITOPERATOR:
if (*(state->buf) == '&' || *(state->buf) == '|')
{
state->state = WAITOPERAND;
*val = (int32) *(state->buf);
(state->buf)++;
return OPR;
}
else if (*(state->buf) == ')')
{
(state->buf)++;
state->count--;
return (state->count < 0) ? ERR : CLOSE;
}
else if (*(state->buf) == '\0')
return (state->count) ? ERR : END;
else if (*(state->buf) != ' ')
return ERR;
break;
default:
return ERR;
break;
}
(state->buf)++;
}
}
/*
* push new one in polish notation reverse view
*/
static void
pushquery(WORKSTATE *state, int32 type, int32 val)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
tmp->type = type;
tmp->val = val;
tmp->next = state->str;
state->str = tmp;
state->num++;
}
#define STACKDEPTH 16
/*
* make polish notation of query
*/
static int32
makepol(WORKSTATE *state)
{
int32 val,
type;
int32 stack[STACKDEPTH];
int32 lenstack = 0;
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
while ((type = gettoken(state, &val)) != END)
{
switch (type)
{
case VAL:
pushquery(state, type, val);
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case OPR:
if (lenstack && val == (int32) '|')
pushquery(state, OPR, val);
else
{
if (lenstack == STACKDEPTH)
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("statement too complex")));
stack[lenstack] = val;
lenstack++;
}
break;
case OPEN:
if (makepol(state) == ERR)
return ERR;
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case CLOSE:
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
};
return END;
break;
case ERR:
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
return ERR;
}
}
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
};
return END;
}
typedef struct
{
int32 *arrb;
int32 *arre;
} CHKVAL;
/*
* is there value 'val' in (sorted) array or not ?
*/
static bool
checkcondition_arr(void *checkval, ITEM *item)
{
int32 *StopLow = ((CHKVAL *) checkval)->arrb;
int32 *StopHigh = ((CHKVAL *) checkval)->arre;
int32 *StopMiddle;
/* Loop invariant: StopLow <= val < StopHigh */
while (StopLow < StopHigh)
{
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
if (*StopMiddle == item->val)
return (true);
else if (*StopMiddle < item->val)
StopLow = StopMiddle + 1;
else
StopHigh = StopMiddle;
}
return false;
}
static bool
checkcondition_bit(void *checkval, ITEM *item)
{
return GETBIT(checkval, HASHVAL(item->val));
}
/*
* evaluate boolean expression, using chkcond() to test the primitive cases
*/
static bool
execute(ITEM *curitem, void *checkval, bool calcnot,
bool (*chkcond) (void *checkval, ITEM *item))
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == VAL)
return (*chkcond) (checkval, curitem);
else if (curitem->val == (int32) '!')
{
return (calcnot) ?
((execute(curitem - 1, checkval, calcnot, chkcond)) ? false : true)
: true;
}
else if (curitem->val == (int32) '&')
{
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return execute(curitem - 1, checkval, calcnot, chkcond);
else
return false;
}
else
{ /* |-operator */
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return true;
else
return execute(curitem - 1, checkval, calcnot, chkcond);
}
}
/*
* signconsistent & execconsistent called by *_consistent
*/
bool
signconsistent(QUERYTYPE *query, BITVEC sign, bool calcnot)
{
return execute(GETQUERY(query) + query->size - 1,
(void *) sign, calcnot,
checkcondition_bit);
}
/* Array must be sorted! */
bool
execconsistent(QUERYTYPE *query, ArrayType *array, bool calcnot)
{
CHKVAL chkval;
CHECKARRVALID(array);
chkval.arrb = ARRPTR(array);
chkval.arre = chkval.arrb + ARRNELEMS(array);
return execute(GETQUERY(query) + query->size - 1,
(void *) &chkval, calcnot,
checkcondition_arr);
}
typedef struct
{
ITEM *first;
bool *mapped_check;
} GinChkVal;
static bool
checkcondition_gin(void *checkval, ITEM *item)
{
GinChkVal *gcv = (GinChkVal *) checkval;
return gcv->mapped_check[item - gcv->first];
}
bool
gin_bool_consistent(QUERYTYPE *query, bool *check)
{
GinChkVal gcv;
ITEM *items = GETQUERY(query);
int i,
j = 0;
if (query->size <= 0)
return FALSE;
/*
* Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size);
for (i = 0; i < query->size; i++)
{
if (items[i].type == VAL)
gcv.mapped_check[i] = check[j++];
}
return execute(GETQUERY(query) + query->size - 1,
(void *) &gcv, true,
checkcondition_gin);
}
static bool
contains_required_value(ITEM *curitem)
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == VAL)
return true;
else if (curitem->val == (int32) '!')
{
/*
* Assume anything under a NOT is non-required. For some cases with
* nested NOTs, we could prove there's a required value, but it seems
* unlikely to be worth the trouble.
*/
return false;
}
else if (curitem->val == (int32) '&')
{
/* If either side has a required value, we're good */
if (contains_required_value(curitem + curitem->left))
return true;
else
return contains_required_value(curitem - 1);
}
else
{ /* |-operator */
/* Both sides must have required values */
if (contains_required_value(curitem + curitem->left))
return contains_required_value(curitem - 1);
else
return false;
}
}
bool
query_has_required_values(QUERYTYPE *query)
{
if (query->size <= 0)
return false;
return contains_required_value(GETQUERY(query) + query->size - 1);
}
/*
* boolean operations
*/
Datum
rboolop(PG_FUNCTION_ARGS)
{
/* just reverse the operands */
return DirectFunctionCall2(boolop,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0));
}
Datum
boolop(PG_FUNCTION_ARGS)
{
ArrayType *val = PG_GETARG_ARRAYTYPE_P_COPY(0);
QUERYTYPE *query = PG_GETARG_QUERYTYPE_P(1);
CHKVAL chkval;
bool result;
CHECKARRVALID(val);
PREPAREARR(val);
chkval.arrb = ARRPTR(val);
chkval.arre = chkval.arrb + ARRNELEMS(val);
result = execute(GETQUERY(query) + query->size - 1,
&chkval, true,
checkcondition_arr);
pfree(val);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(result);
}
static void
findoprnd(ITEM *ptr, int32 *pos)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
#ifdef BS_DEBUG
elog(DEBUG3, (ptr[*pos].type == OPR) ?
"%d %c" : "%d %d", *pos, ptr[*pos].val);
#endif
if (ptr[*pos].type == VAL)
{
ptr[*pos].left = 0;
(*pos)--;
}
else if (ptr[*pos].val == (int32) '!')
{
ptr[*pos].left = -1;
(*pos)--;
findoprnd(ptr, pos);
}
else
{
ITEM *curitem = &ptr[*pos];
int32 tmp = *pos;
(*pos)--;
findoprnd(ptr, pos);
curitem->left = *pos - tmp;
findoprnd(ptr, pos);
}
}
/*
* input
*/
Datum
bqarr_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
WORKSTATE state;
int32 i;
QUERYTYPE *query;
int32 commonlen;
ITEM *ptr;
NODE *tmp;
int32 pos = 0;
#ifdef BS_DEBUG
StringInfoData pbuf;
#endif
state.buf = buf;
state.state = WAITOPERAND;
state.count = 0;
state.num = 0;
state.str = NULL;
/* make polish notation (postfix, but in reverse order) */
makepol(&state);
if (!state.num)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
if (state.num > QUERYTYPEMAXITEMS)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of query items (%d) exceeds the maximum allowed (%d)",
state.num, (int) QUERYTYPEMAXITEMS)));
commonlen = COMPUTESIZE(state.num);
query = (QUERYTYPE *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
ptr = GETQUERY(query);
for (i = state.num - 1; i >= 0; i--)
{
ptr[i].type = state.str->type;
ptr[i].val = state.str->val;
tmp = state.str->next;
pfree(state.str);
state.str = tmp;
}
pos = query->size - 1;
findoprnd(ptr, &pos);
#ifdef BS_DEBUG
initStringInfo(&pbuf);
for (i = 0; i < query->size; i++)
{
if (ptr[i].type == OPR)
appendStringInfo(&pbuf, "%c(%d) ", ptr[i].val, ptr[i].left);
else
appendStringInfo(&pbuf, "%d ", ptr[i].val);
}
elog(DEBUG3, "POR: %s", pbuf.data);
pfree(pbuf.data);
#endif
PG_RETURN_POINTER(query);
}
/*
* out function
*/
typedef struct
{
ITEM *curpol;
char *buf;
char *cur;
int32 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) { \
int32 len = inf->cur - inf->buf; \
inf->buflen *= 2; \
inf->buf = (char*) repalloc( (void*)inf->buf, inf->buflen ); \
inf->cur = inf->buf + len; \
}
static void
infix(INFIX *in, bool first)
{
if (in->curpol->type == VAL)
{
RESIZEBUF(in, 11);
sprintf(in->cur, "%d", in->curpol->val);
in->cur = strchr(in->cur, '\0');
in->curpol--;
}
else if (in->curpol->val == (int32) '!')
{
bool isopr = false;
RESIZEBUF(in, 1);
*(in->cur) = '!';
in->cur++;
*(in->cur) = '\0';
in->curpol--;
if (in->curpol->type == OPR)
{
isopr = true;
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
infix(in, isopr);
if (isopr)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
else
{
int32 op = in->curpol->val;
INFIX nrm;
in->curpol--;
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
nrm.curpol = in->curpol;
nrm.buflen = 16;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
/* get right operand */
infix(&nrm, false);
/* get & print left operand */
in->curpol = nrm.curpol;
infix(in, false);
/* print operator & right operand */
RESIZEBUF(in, 3 + (nrm.cur - nrm.buf));
sprintf(in->cur, " %c %s", op, nrm.buf);
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
}
Datum
bqarr_out(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = PG_GETARG_QUERYTYPE_P(0);
INFIX nrm;
if (query->size == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
nrm.curpol = GETQUERY(query) + query->size - 1;
nrm.buflen = 32;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
infix(&nrm, true);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_POINTER(nrm.buf);
}
/* Useless old "debugging" function for a fundamentally wrong algorithm */
Datum
querytree(PG_FUNCTION_ARGS)
{
elog(ERROR, "querytree is no longer implemented");
PG_RETURN_NULL();
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_4 |
crossvul-cpp_data_bad_5755_2 | /*
* BRIEF MODULE DESCRIPTION
* Au1200 LCD Driver.
*
* Copyright 2004-2005 AMD
* Author: AMD
*
* Based on:
* linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
* Created 28 Dec 1997 by Geert Uytterhoeven
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1200fb.h> /* platform_data */
#include "au1200fb.h"
#define DRIVER_NAME "au1200fb"
#define DRIVER_DESC "LCD controller driver for AU1200 processors"
#define DEBUG 0
#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg)
#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg)
#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg)
#if DEBUG
#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg)
#else
#define print_dbg(f, arg...) do {} while (0)
#endif
#define AU1200_LCD_FB_IOCTL 0x46FF
#define AU1200_LCD_SET_SCREEN 1
#define AU1200_LCD_GET_SCREEN 2
#define AU1200_LCD_SET_WINDOW 3
#define AU1200_LCD_GET_WINDOW 4
#define AU1200_LCD_SET_PANEL 5
#define AU1200_LCD_GET_PANEL 6
#define SCREEN_SIZE (1<< 1)
#define SCREEN_BACKCOLOR (1<< 2)
#define SCREEN_BRIGHTNESS (1<< 3)
#define SCREEN_COLORKEY (1<< 4)
#define SCREEN_MASK (1<< 5)
struct au1200_lcd_global_regs_t {
unsigned int flags;
unsigned int xsize;
unsigned int ysize;
unsigned int backcolor;
unsigned int brightness;
unsigned int colorkey;
unsigned int mask;
unsigned int panel_choice;
char panel_desc[80];
};
#define WIN_POSITION (1<< 0)
#define WIN_ALPHA_COLOR (1<< 1)
#define WIN_ALPHA_MODE (1<< 2)
#define WIN_PRIORITY (1<< 3)
#define WIN_CHANNEL (1<< 4)
#define WIN_BUFFER_FORMAT (1<< 5)
#define WIN_COLOR_ORDER (1<< 6)
#define WIN_PIXEL_ORDER (1<< 7)
#define WIN_SIZE (1<< 8)
#define WIN_COLORKEY_MODE (1<< 9)
#define WIN_DOUBLE_BUFFER_MODE (1<< 10)
#define WIN_RAM_ARRAY_MODE (1<< 11)
#define WIN_BUFFER_SCALE (1<< 12)
#define WIN_ENABLE (1<< 13)
struct au1200_lcd_window_regs_t {
unsigned int flags;
unsigned int xpos;
unsigned int ypos;
unsigned int alpha_color;
unsigned int alpha_mode;
unsigned int priority;
unsigned int channel;
unsigned int buffer_format;
unsigned int color_order;
unsigned int pixel_order;
unsigned int xsize;
unsigned int ysize;
unsigned int colorkey_mode;
unsigned int double_buffer_mode;
unsigned int ram_array_mode;
unsigned int xscale;
unsigned int yscale;
unsigned int enable;
};
struct au1200_lcd_iodata_t {
unsigned int subcmd;
struct au1200_lcd_global_regs_t global;
struct au1200_lcd_window_regs_t window;
};
#if defined(__BIG_ENDIAN)
#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11
#else
#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00
#endif
#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565
/* Private, per-framebuffer management information (independent of the panel itself) */
struct au1200fb_device {
struct fb_info *fb_info; /* FB driver info record */
struct au1200fb_platdata *pd;
int plane;
unsigned char* fb_mem; /* FrameBuffer memory map */
unsigned int fb_len;
dma_addr_t fb_phys;
};
/********************************************************************/
/* LCD controller restrictions */
#define AU1200_LCD_MAX_XRES 1280
#define AU1200_LCD_MAX_YRES 1024
#define AU1200_LCD_MAX_BPP 32
#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */
#define AU1200_LCD_NBR_PALETTE_ENTRIES 256
/* Default number of visible screen buffer to allocate */
#define AU1200FB_NBR_VIDEO_BUFFERS 1
/* Default maximum number of fb devices to create */
#define MAX_DEVICE_COUNT 4
/* Default window configuration entry to use (see windows[]) */
#define DEFAULT_WINDOW_INDEX 2
/********************************************************************/
static struct fb_info *_au1200fb_infos[MAX_DEVICE_COUNT];
static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR;
static int device_count = MAX_DEVICE_COUNT;
static int window_index = DEFAULT_WINDOW_INDEX; /* default is zero */
static int panel_index = 2; /* default is zero */
static struct window_settings *win;
static struct panel_settings *panel;
static int noblanking = 1;
static int nohwcursor = 0;
struct window_settings {
unsigned char name[64];
uint32 mode_backcolor;
uint32 mode_colorkey;
uint32 mode_colorkeymsk;
struct {
int xres;
int yres;
int xpos;
int ypos;
uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */
uint32 mode_winenable;
} w[4];
};
#if defined(__BIG_ENDIAN)
#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00
#else
#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
#endif
/*
* Default window configurations
*/
static struct window_settings windows[] = {
{ /* Index 0 */
"0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
/* mode_backcolor */ 0x006600ff,
/* mode_colorkey,msk*/ 0, 0,
{
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ LCD_WINENABLE_WEN0,
},
{
/* xres, yres, xpos, ypos */ 100, 100, 100, 100,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ LCD_WINENABLE_WEN1,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ 0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0,
},
},
},
{ /* Index 1 */
"0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
/* mode_backcolor */ 0x006600ff,
/* mode_colorkey,msk*/ 0, 0,
{
{
/* xres, yres, xpos, ypos */ 320, 240, 5, 5,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP |
LCD_WINCTRL1_PO_00,
/* mode_winenable*/ LCD_WINENABLE_WEN0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565
| LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ 0,
},
{
/* xres, yres, xpos, ypos */ 100, 100, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
},
{
/* xres, yres, xpos, ypos */ 200, 25, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0,
},
},
},
{ /* Index 2 */
"0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
/* mode_backcolor */ 0x006600ff,
/* mode_colorkey,msk*/ 0, 0,
{
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ LCD_WINENABLE_WEN0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP,
/* mode_winenable*/ 0,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP |
LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
},
{
/* xres, yres, xpos, ypos */ 0, 0, 0, 0,
/* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
LCD_WINCTRL1_PO_16BPP |
LCD_WINCTRL1_PIPE,
/* mode_winenable*/ 0,
},
},
},
/* Need VGA 640 @ 24bpp, @ 32bpp */
/* Need VGA 800 @ 24bpp, @ 32bpp */
/* Need VGA 1024 @ 24bpp, @ 32bpp */
};
/*
* Controller configurations for various panels.
*/
struct panel_settings
{
const char name[25]; /* Full name <vendor>_<model> */
struct fb_monspecs monspecs; /* FB monitor specs */
/* panel timings */
uint32 mode_screen;
uint32 mode_horztiming;
uint32 mode_verttiming;
uint32 mode_clkcontrol;
uint32 mode_pwmdiv;
uint32 mode_pwmhi;
uint32 mode_outmask;
uint32 mode_fifoctrl;
uint32 mode_toyclksrc;
uint32 mode_backlight;
uint32 mode_auxpll;
#define Xres min_xres
#define Yres min_yres
u32 min_xres; /* Minimum horizontal resolution */
u32 max_xres; /* Maximum horizontal resolution */
u32 min_yres; /* Minimum vertical resolution */
u32 max_yres; /* Maximum vertical resolution */
};
/********************************************************************/
/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */
/* List of panels known to work with the AU1200 LCD controller.
* To add a new panel, enter the same specifications as the
* Generic_TFT one, and MAKE SURE that it doesn't conflicts
* with the controller restrictions. Restrictions are:
*
* STN color panels: max_bpp <= 12
* STN mono panels: max_bpp <= 4
* TFT panels: max_bpp <= 16
* max_xres <= 800
* max_yres <= 600
*/
static struct panel_settings known_lcd_panels[] =
{
[0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */
.name = "QVGA_320x240",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(320) |
LCD_SCREEN_SY_N(240),
.mode_horztiming = 0x00c4623b,
.mode_verttiming = 0x00502814,
.mode_clkcontrol = 0x00020002, /* /4=24Mhz */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
320, 320,
240, 240,
},
[1] = { /* VGA 640x480 H:30.3kHz V:58Hz */
.name = "VGA_640x480",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x13f9df80,
.mode_horztiming = 0x003c5859,
.mode_verttiming = 0x00741201,
.mode_clkcontrol = 0x00020001, /* /4=24Mhz */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
640, 480,
640, 480,
},
[2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */
.name = "SVGA_800x600",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x18fa5780,
.mode_horztiming = 0x00dc7e77,
.mode_verttiming = 0x00584805,
.mode_clkcontrol = 0x00020000, /* /2=48Mhz */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
800, 800,
600, 600,
},
[3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */
.name = "XVGA_1024x768",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x1ffaff80,
.mode_horztiming = 0x007d0e57,
.mode_verttiming = 0x00740a01,
.mode_clkcontrol = 0x000A0000, /* /1 */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 6, /* 72MHz AUXPLL */
1024, 1024,
768, 768,
},
[4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */
.name = "XVGA_1280x1024",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x27fbff80,
.mode_horztiming = 0x00cdb2c7,
.mode_verttiming = 0x00600002,
.mode_clkcontrol = 0x000A0000, /* /1 */
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 10, /* 120MHz AUXPLL */
1280, 1280,
1024, 1024,
},
[5] = { /* Samsung 1024x768 TFT */
.name = "Samsung_1024x768_TFT",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = 0x1ffaff80,
.mode_horztiming = 0x018cc677,
.mode_verttiming = 0x00241217,
.mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */
.mode_pwmdiv = 0x8000063f, /* SCB 0x0 */
.mode_pwmhi = 0x03400000, /* SCB 0x0 */
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
1024, 1024,
768, 768,
},
[6] = { /* Toshiba 640x480 TFT */
.name = "Toshiba_640x480_TFT",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(640) |
LCD_SCREEN_SY_N(480),
.mode_horztiming = LCD_HORZTIMING_HPW_N(96) |
LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51),
.mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32),
.mode_clkcontrol = 0x00000000, /* /4=24Mhz */
.mode_pwmdiv = 0x8000063f,
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
640, 480,
640, 480,
},
[7] = { /* Sharp 320x240 TFT */
.name = "Sharp_320x240_TFT",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 12500,
.hfmax = 20000,
.vfmin = 38,
.vfmax = 81,
.dclkmin = 4500000,
.dclkmax = 6800000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(320) |
LCD_SCREEN_SY_N(240),
.mode_horztiming = LCD_HORZTIMING_HPW_N(60) |
LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2),
.mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5),
.mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/
.mode_pwmdiv = 0x8000063f,
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
320, 320,
240, 240,
},
[8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */
.name = "Toppoly_TD070WGCB2",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(856) |
LCD_SCREEN_SY_N(480),
.mode_horztiming = LCD_HORZTIMING_HND2_N(43) |
LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114),
.mode_verttiming = LCD_VERTTIMING_VND2_N(20) |
LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4),
.mode_clkcontrol = 0x00020001, /* /4=24Mhz */
.mode_pwmdiv = 0x8000063f,
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
856, 856,
480, 480,
},
[9] = {
.name = "DB1300_800x480",
.monspecs = {
.modedb = NULL,
.modedb_len = 0,
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
.dclkmin = 6000000,
.dclkmax = 28000000,
.input = FB_DISP_RGB,
},
.mode_screen = LCD_SCREEN_SX_N(800) |
LCD_SCREEN_SY_N(480),
.mode_horztiming = LCD_HORZTIMING_HPW_N(5) |
LCD_HORZTIMING_HND1_N(16) |
LCD_HORZTIMING_HND2_N(8),
.mode_verttiming = LCD_VERTTIMING_VPW_N(4) |
LCD_VERTTIMING_VND1_N(8) |
LCD_VERTTIMING_VND2_N(5),
.mode_clkcontrol = LCD_CLKCONTROL_PCD_N(1) |
LCD_CLKCONTROL_IV |
LCD_CLKCONTROL_IH,
.mode_pwmdiv = 0x00000000,
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = (48/12) * 2,
800, 800,
480, 480,
},
};
#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
/********************************************************************/
static int winbpp (unsigned int winctrl1)
{
int bits = 0;
/* how many bits are needed for each pixel format */
switch (winctrl1 & LCD_WINCTRL1_FRM) {
case LCD_WINCTRL1_FRM_1BPP:
bits = 1;
break;
case LCD_WINCTRL1_FRM_2BPP:
bits = 2;
break;
case LCD_WINCTRL1_FRM_4BPP:
bits = 4;
break;
case LCD_WINCTRL1_FRM_8BPP:
bits = 8;
break;
case LCD_WINCTRL1_FRM_12BPP:
case LCD_WINCTRL1_FRM_16BPP655:
case LCD_WINCTRL1_FRM_16BPP565:
case LCD_WINCTRL1_FRM_16BPP556:
case LCD_WINCTRL1_FRM_16BPPI1555:
case LCD_WINCTRL1_FRM_16BPPI5551:
case LCD_WINCTRL1_FRM_16BPPA1555:
case LCD_WINCTRL1_FRM_16BPPA5551:
bits = 16;
break;
case LCD_WINCTRL1_FRM_24BPP:
case LCD_WINCTRL1_FRM_32BPP:
bits = 32;
break;
}
return bits;
}
static int fbinfo2index (struct fb_info *fb_info)
{
int i;
for (i = 0; i < device_count; ++i) {
if (fb_info == _au1200fb_infos[i])
return i;
}
printk("au1200fb: ERROR: fbinfo2index failed!\n");
return -1;
}
static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
int xpos, int ypos)
{
uint32 winctrl0, winctrl1, winenable, fb_offset = 0;
int xsz, ysz;
/* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */
winctrl0 = lcd->window[plane].winctrl0;
winctrl1 = lcd->window[plane].winctrl1;
winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN);
winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY);
/* Check for off-screen adjustments */
xsz = win->w[plane].xres;
ysz = win->w[plane].yres;
if ((xpos + win->w[plane].xres) > panel->Xres) {
/* Off-screen to the right */
xsz = panel->Xres - xpos; /* off by 1 ??? */
/*printk("off screen right\n");*/
}
if ((ypos + win->w[plane].yres) > panel->Yres) {
/* Off-screen to the bottom */
ysz = panel->Yres - ypos; /* off by 1 ??? */
/*printk("off screen bottom\n");*/
}
if (xpos < 0) {
/* Off-screen to the left */
xsz = win->w[plane].xres + xpos;
fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8);
xpos = 0;
/*printk("off screen left\n");*/
}
if (ypos < 0) {
/* Off-screen to the top */
ysz = win->w[plane].yres + ypos;
/* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */
ypos = 0;
/*printk("off screen top\n");*/
}
/* record settings */
win->w[plane].xpos = xpos;
win->w[plane].ypos = ypos;
xsz -= 1;
ysz -= 1;
winctrl0 |= (xpos << 21);
winctrl0 |= (ypos << 10);
winctrl1 |= (xsz << 11);
winctrl1 |= (ysz << 0);
/* Disable the window while making changes, then restore WINEN */
winenable = lcd->winenable & (1 << plane);
au_sync();
lcd->winenable &= ~(1 << plane);
lcd->window[plane].winctrl0 = winctrl0;
lcd->window[plane].winctrl1 = winctrl1;
lcd->window[plane].winbuf0 =
lcd->window[plane].winbuf1 = fbdev->fb_phys;
lcd->window[plane].winbufctrl = 0; /* select winbuf0 */
lcd->winenable |= winenable;
au_sync();
return 0;
}
static void au1200_setpanel(struct panel_settings *newpanel,
struct au1200fb_platdata *pd)
{
/*
* Perform global setup/init of LCD controller
*/
uint32 winenable;
/* Make sure all windows disabled */
winenable = lcd->winenable;
lcd->winenable = 0;
au_sync();
/*
* Ensure everything is disabled before reconfiguring
*/
if (lcd->screen & LCD_SCREEN_SEN) {
/* Wait for vertical sync period */
lcd->intstatus = LCD_INT_SS;
while ((lcd->intstatus & LCD_INT_SS) == 0) {
au_sync();
}
lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/
do {
lcd->intstatus = lcd->intstatus; /*clear interrupts*/
au_sync();
/*wait for controller to shut down*/
} while ((lcd->intstatus & LCD_INT_SD) == 0);
/* Call shutdown of current panel (if up) */
/* this must occur last, because if an external clock is driving
the controller, the clock cannot be turned off before first
shutting down the controller.
*/
if (pd->panel_shutdown)
pd->panel_shutdown();
}
/* Newpanel == NULL indicates a shutdown operation only */
if (newpanel == NULL)
return;
panel = newpanel;
printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres);
/*
* Setup clocking if internal LCD clock source (assumes sys_auxpll valid)
*/
if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT))
{
uint32 sys_clksrc;
au_writel(panel->mode_auxpll, SYS_AUXPLL);
sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f;
sys_clksrc |= panel->mode_toyclksrc;
au_writel(sys_clksrc, SYS_CLKSRC);
}
/*
* Configure panel timings
*/
lcd->screen = panel->mode_screen;
lcd->horztiming = panel->mode_horztiming;
lcd->verttiming = panel->mode_verttiming;
lcd->clkcontrol = panel->mode_clkcontrol;
lcd->pwmdiv = panel->mode_pwmdiv;
lcd->pwmhi = panel->mode_pwmhi;
lcd->outmask = panel->mode_outmask;
lcd->fifoctrl = panel->mode_fifoctrl;
au_sync();
/* fixme: Check window settings to make sure still valid
* for new geometry */
#if 0
au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos);
au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos);
au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos);
au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos);
#endif
lcd->winenable = winenable;
/*
* Re-enable screen now that it is configured
*/
lcd->screen |= LCD_SCREEN_SEN;
au_sync();
/* Call init of panel */
if (pd->panel_init)
pd->panel_init();
/* FIX!!!! not appropriate on panel change!!! Global setup/init */
lcd->intenable = 0;
lcd->intstatus = ~0;
lcd->backcolor = win->mode_backcolor;
/* Setup Color Key - FIX!!! */
lcd->colorkey = win->mode_colorkey;
lcd->colorkeymsk = win->mode_colorkeymsk;
/* Setup HWCursor - FIX!!! Need to support this eventually */
lcd->hwc.cursorctrl = 0;
lcd->hwc.cursorpos = 0;
lcd->hwc.cursorcolor0 = 0;
lcd->hwc.cursorcolor1 = 0;
lcd->hwc.cursorcolor2 = 0;
lcd->hwc.cursorcolor3 = 0;
#if 0
#define D(X) printk("%25s: %08X\n", #X, X)
D(lcd->screen);
D(lcd->horztiming);
D(lcd->verttiming);
D(lcd->clkcontrol);
D(lcd->pwmdiv);
D(lcd->pwmhi);
D(lcd->outmask);
D(lcd->fifoctrl);
D(lcd->window[0].winctrl0);
D(lcd->window[0].winctrl1);
D(lcd->window[0].winctrl2);
D(lcd->window[0].winbuf0);
D(lcd->window[0].winbuf1);
D(lcd->window[0].winbufctrl);
D(lcd->window[1].winctrl0);
D(lcd->window[1].winctrl1);
D(lcd->window[1].winctrl2);
D(lcd->window[1].winbuf0);
D(lcd->window[1].winbuf1);
D(lcd->window[1].winbufctrl);
D(lcd->window[2].winctrl0);
D(lcd->window[2].winctrl1);
D(lcd->window[2].winctrl2);
D(lcd->window[2].winbuf0);
D(lcd->window[2].winbuf1);
D(lcd->window[2].winbufctrl);
D(lcd->window[3].winctrl0);
D(lcd->window[3].winctrl1);
D(lcd->window[3].winctrl2);
D(lcd->window[3].winbuf0);
D(lcd->window[3].winbuf1);
D(lcd->window[3].winbufctrl);
D(lcd->winenable);
D(lcd->intenable);
D(lcd->intstatus);
D(lcd->backcolor);
D(lcd->winenable);
D(lcd->colorkey);
D(lcd->colorkeymsk);
D(lcd->hwc.cursorctrl);
D(lcd->hwc.cursorpos);
D(lcd->hwc.cursorcolor0);
D(lcd->hwc.cursorcolor1);
D(lcd->hwc.cursorcolor2);
D(lcd->hwc.cursorcolor3);
#endif
}
static void au1200_setmode(struct au1200fb_device *fbdev)
{
int plane = fbdev->plane;
/* Window/plane setup */
lcd->window[plane].winctrl1 = ( 0
| LCD_WINCTRL1_PRI_N(plane)
| win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */
) ;
au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos);
lcd->window[plane].winctrl2 = ( 0
| LCD_WINCTRL2_CKMODE_00
| LCD_WINCTRL2_DBM
| LCD_WINCTRL2_BX_N(fbdev->fb_info->fix.line_length)
| LCD_WINCTRL2_SCX_1
| LCD_WINCTRL2_SCY_1
) ;
lcd->winenable |= win->w[plane].mode_winenable;
au_sync();
}
/* Inline helpers */
/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN)
/* Bitfields format supported by the controller. */
static struct fb_bitfield rgb_bitfields[][4] = {
/* Red, Green, Blue, Transp */
[LCD_WINCTRL1_FRM_16BPP655 >> 25] =
{ { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPP565 >> 25] =
{ { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPP556 >> 25] =
{ { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPPI1555 >> 25] =
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPPI5551 >> 25] =
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_16BPPA1555 >> 25] =
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
[LCD_WINCTRL1_FRM_16BPPA5551 >> 25] =
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
[LCD_WINCTRL1_FRM_24BPP >> 25] =
{ { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } },
[LCD_WINCTRL1_FRM_32BPP >> 25] =
{ { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } },
};
/*-------------------------------------------------------------------------*/
/* Helpers */
static void au1200fb_update_fbinfo(struct fb_info *fbi)
{
/* FIX!!!! This also needs to take the window pixel format into account!!! */
/* Update var-dependent FB info */
if (panel_is_color(panel)) {
if (fbi->var.bits_per_pixel <= 8) {
/* palettized */
fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR;
fbi->fix.line_length = fbi->var.xres_virtual /
(8/fbi->var.bits_per_pixel);
} else {
/* non-palettized */
fbi->fix.visual = FB_VISUAL_TRUECOLOR;
fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8);
}
} else {
/* mono FIX!!! mono 8 and 4 bits */
fbi->fix.visual = FB_VISUAL_MONO10;
fbi->fix.line_length = fbi->var.xres_virtual / 8;
}
fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual;
print_dbg("line length: %d\n", fbi->fix.line_length);
print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel);
}
/*-------------------------------------------------------------------------*/
/* AU1200 framebuffer driver */
/* fb_check_var
* Validate var settings with hardware restrictions and modify it if necessary
*/
static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
struct au1200fb_device *fbdev = fbi->par;
u32 pixclock;
int screen_size, plane;
plane = fbdev->plane;
/* Make sure that the mode respect all LCD controller and
* panel restrictions. */
var->xres = win->w[plane].xres;
var->yres = win->w[plane].yres;
/* No need for virtual resolution support */
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1);
screen_size = var->xres_virtual * var->yres_virtual;
if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8);
else screen_size /= (8/var->bits_per_pixel);
if (fbdev->fb_len < screen_size)
return -EINVAL; /* Virtual screen is to big, abort */
/* FIX!!!! what are the implicaitons of ignoring this for windows ??? */
/* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel
* clock can only be obtain by dividing this value by an even integer.
* Fallback to a slower pixel clock if necessary. */
pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
pixclock = min3(pixclock, fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2);
if (AU1200_LCD_MAX_CLK % pixclock) {
int diff = AU1200_LCD_MAX_CLK % pixclock;
pixclock -= diff;
}
var->pixclock = KHZ2PICOS(pixclock/1000);
#if 0
if (!panel_is_active(panel)) {
int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1;
if (!panel_is_color(panel)
&& (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) {
/* STN 8bit mono panel support is up to 6MHz pixclock */
var->pixclock = KHZ2PICOS(6000);
} else if (!pcd) {
/* Other STN panel support is up to 12MHz */
var->pixclock = KHZ2PICOS(12000);
}
}
#endif
/* Set bitfield accordingly */
switch (var->bits_per_pixel) {
case 16:
{
/* 16bpp True color.
* These must be set to MATCH WINCTRL[FORM] */
int idx;
idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
var->red = rgb_bitfields[idx][0];
var->green = rgb_bitfields[idx][1];
var->blue = rgb_bitfields[idx][2];
var->transp = rgb_bitfields[idx][3];
break;
}
case 32:
{
/* 32bpp True color.
* These must be set to MATCH WINCTRL[FORM] */
int idx;
idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
var->red = rgb_bitfields[idx][0];
var->green = rgb_bitfields[idx][1];
var->blue = rgb_bitfields[idx][2];
var->transp = rgb_bitfields[idx][3];
break;
}
default:
print_dbg("Unsupported depth %dbpp", var->bits_per_pixel);
return -EINVAL;
}
return 0;
}
/* fb_set_par
* Set hardware with var settings. This will enable the controller with a
* specific mode, normally validated with the fb_check_var method
*/
static int au1200fb_fb_set_par(struct fb_info *fbi)
{
struct au1200fb_device *fbdev = fbi->par;
au1200fb_update_fbinfo(fbi);
au1200_setmode(fbdev);
return 0;
}
/* fb_setcolreg
* Set color in LCD palette.
*/
static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *fbi)
{
volatile u32 *palette = lcd->palette;
u32 value;
if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale */
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
}
if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
palette = (u32*) fbi->pseudo_palette;
red >>= (16 - fbi->var.red.length);
green >>= (16 - fbi->var.green.length);
blue >>= (16 - fbi->var.blue.length);
value = (red << fbi->var.red.offset) |
(green << fbi->var.green.offset)|
(blue << fbi->var.blue.offset);
value &= 0xFFFF;
} else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) {
/* COLOR TFT PALLETTIZED (use RGB 565) */
value = (red & 0xF800)|((green >> 5) &
0x07E0)|((blue >> 11) & 0x001F);
value &= 0xFFFF;
} else if (0 /*panel_is_color(fbdev->panel)*/) {
/* COLOR STN MODE */
value = 0x1234;
value &= 0xFFF;
} else {
/* MONOCHROME MODE */
value = (green >> 12) & 0x000F;
value &= 0xF;
}
palette[regno] = value;
return 0;
}
/* fb_blank
* Blank the screen. Depending on the mode, the screen will be
* activated with the backlight color, or desactivated
*/
static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct au1200fb_device *fbdev = fbi->par;
/* Short-circuit screen blanking */
if (noblanking)
return 0;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
/* printk("turn on panel\n"); */
au1200_setpanel(panel, fbdev->pd);
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
/* printk("turn off panel\n"); */
au1200_setpanel(NULL, fbdev->pd);
break;
default:
break;
}
/* FB_BLANK_NORMAL is a soft blank */
return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0;
}
/* fb_mmap
* Map video memory in user space. We don't use the generic fb_mmap
* method mainly to allow the use of the TLB streaming flag (CCA=6)
*/
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int len;
unsigned long start=0, off;
struct au1200fb_device *fbdev = info->par;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
{
unsigned int hi1, divider;
/* SCREEN_SIZE: user cannot reset size, must switch panel choice */
if (pdata->flags & SCREEN_BACKCOLOR)
lcd->backcolor = pdata->backcolor;
if (pdata->flags & SCREEN_BRIGHTNESS) {
// limit brightness pwm duty to >= 30/1600
if (pdata->brightness < 30) {
pdata->brightness = 30;
}
divider = (lcd->pwmdiv & 0x3FFFF) + 1;
hi1 = (lcd->pwmhi >> 16) + 1;
hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8);
lcd->pwmhi &= 0xFFFF;
lcd->pwmhi |= (hi1 << 16);
}
if (pdata->flags & SCREEN_COLORKEY)
lcd->colorkey = pdata->colorkey;
if (pdata->flags & SCREEN_MASK)
lcd->colorkeymsk = pdata->mask;
au_sync();
}
static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
{
unsigned int hi1, divider;
pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1;
pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1;
pdata->backcolor = lcd->backcolor;
pdata->colorkey = lcd->colorkey;
pdata->mask = lcd->colorkeymsk;
// brightness
hi1 = (lcd->pwmhi >> 16) + 1;
divider = (lcd->pwmdiv & 0x3FFFF) + 1;
pdata->brightness = ((hi1 << 8) / divider) - 1;
au_sync();
}
static void set_window(unsigned int plane,
struct au1200_lcd_window_regs_t *pdata)
{
unsigned int val, bpp;
/* Window control register 0 */
if (pdata->flags & WIN_POSITION) {
val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX |
LCD_WINCTRL0_OY);
val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX);
val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY);
lcd->window[plane].winctrl0 = val;
}
if (pdata->flags & WIN_ALPHA_COLOR) {
val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A);
val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A);
lcd->window[plane].winctrl0 = val;
}
if (pdata->flags & WIN_ALPHA_MODE) {
val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN);
val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN);
lcd->window[plane].winctrl0 = val;
}
/* Window control register 1 */
if (pdata->flags & WIN_PRIORITY) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI);
val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_CHANNEL) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE);
val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_BUFFER_FORMAT) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM);
val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_COLOR_ORDER) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO);
val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_PIXEL_ORDER) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO);
val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO);
lcd->window[plane].winctrl1 = val;
}
if (pdata->flags & WIN_SIZE) {
val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX |
LCD_WINCTRL1_SZY);
val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX);
val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY);
lcd->window[plane].winctrl1 = val;
/* program buffer line width */
bpp = winbpp(val) / 8;
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX);
val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX);
lcd->window[plane].winctrl2 = val;
}
/* Window control register 2 */
if (pdata->flags & WIN_COLORKEY_MODE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE);
val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE);
lcd->window[plane].winctrl2 = val;
}
if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM);
val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM);
lcd->window[plane].winctrl2 = val;
}
if (pdata->flags & WIN_RAM_ARRAY_MODE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM);
val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM);
lcd->window[plane].winctrl2 = val;
}
/* Buffer line width programmed with WIN_SIZE */
if (pdata->flags & WIN_BUFFER_SCALE) {
val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX |
LCD_WINCTRL2_SCY);
val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX);
val |= ((pdata->ysize) & LCD_WINCTRL2_SCY);
lcd->window[plane].winctrl2 = val;
}
if (pdata->flags & WIN_ENABLE) {
val = lcd->winenable;
val &= ~(1<<plane);
val |= (pdata->enable & 1) << plane;
lcd->winenable = val;
}
au_sync();
}
static void get_window(unsigned int plane,
struct au1200_lcd_window_regs_t *pdata)
{
/* Window control register 0 */
pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21;
pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10;
pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2;
pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1;
/* Window control register 1 */
pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30;
pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29;
pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25;
pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24;
pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22;
pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1;
pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1;
/* Window control register 2 */
pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24;
pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23;
pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21;
pdata->enable = (lcd->winenable >> plane) & 1;
au_sync();
}
static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct au1200fb_device *fbdev = info->par;
int plane;
int val;
plane = fbinfo2index(info);
print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane);
if (cmd == AU1200_LCD_FB_IOCTL) {
struct au1200_lcd_iodata_t iodata;
if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata)))
return -EFAULT;
print_dbg("FB IOCTL called\n");
switch (iodata.subcmd) {
case AU1200_LCD_SET_SCREEN:
print_dbg("AU1200_LCD_SET_SCREEN\n");
set_global(cmd, &iodata.global);
break;
case AU1200_LCD_GET_SCREEN:
print_dbg("AU1200_LCD_GET_SCREEN\n");
get_global(cmd, &iodata.global);
break;
case AU1200_LCD_SET_WINDOW:
print_dbg("AU1200_LCD_SET_WINDOW\n");
set_window(plane, &iodata.window);
break;
case AU1200_LCD_GET_WINDOW:
print_dbg("AU1200_LCD_GET_WINDOW\n");
get_window(plane, &iodata.window);
break;
case AU1200_LCD_SET_PANEL:
print_dbg("AU1200_LCD_SET_PANEL\n");
if ((iodata.global.panel_choice >= 0) &&
(iodata.global.panel_choice <
NUM_PANELS))
{
struct panel_settings *newpanel;
panel_index = iodata.global.panel_choice;
newpanel = &known_lcd_panels[panel_index];
au1200_setpanel(newpanel, fbdev->pd);
}
break;
case AU1200_LCD_GET_PANEL:
print_dbg("AU1200_LCD_GET_PANEL\n");
iodata.global.panel_choice = panel_index;
break;
default:
return -EINVAL;
}
val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata));
if (val) {
print_dbg("error: could not copy %d bytes\n", val);
return -EFAULT;
}
}
return 0;
}
static struct fb_ops au1200fb_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = au1200fb_fb_check_var,
.fb_set_par = au1200fb_fb_set_par,
.fb_setcolreg = au1200fb_fb_setcolreg,
.fb_blank = au1200fb_fb_blank,
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_sync = NULL,
.fb_ioctl = au1200fb_ioctl,
.fb_mmap = au1200fb_fb_mmap,
};
/*-------------------------------------------------------------------------*/
static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
{
/* Nothing to do for now, just clear any pending interrupt */
lcd->intstatus = lcd->intstatus;
au_sync();
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
/* AU1200 LCD device probe helpers */
static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
{
struct fb_info *fbi = fbdev->fb_info;
int bpp;
fbi->fbops = &au1200fb_fb_ops;
bpp = winbpp(win->w[fbdev->plane].mode_winctrl1);
/* Copy monitor specs from panel data */
/* fixme: we're setting up LCD controller windows, so these dont give a
damn as to what the monitor specs are (the panel itself does, but that
isn't done here...so maybe need a generic catchall monitor setting??? */
memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs));
/* We first try the user mode passed in argument. If that failed,
* or if no one has been specified, we default to the first mode of the
* panel list. Note that after this call, var data will be set */
if (!fb_find_mode(&fbi->var,
fbi,
NULL, /* drv_info.opt_mode, */
fbi->monspecs.modedb,
fbi->monspecs.modedb_len,
fbi->monspecs.modedb,
bpp)) {
print_err("Cannot find valid mode for panel %s", panel->name);
return -EFAULT;
}
fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
if (!fbi->pseudo_palette) {
return -ENOMEM;
}
if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
AU1200_LCD_NBR_PALETTE_ENTRIES);
kfree(fbi->pseudo_palette);
return -EFAULT;
}
strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
fbi->fix.smem_start = fbdev->fb_phys;
fbi->fix.smem_len = fbdev->fb_len;
fbi->fix.type = FB_TYPE_PACKED_PIXELS;
fbi->fix.xpanstep = 0;
fbi->fix.ypanstep = 0;
fbi->fix.mmio_start = 0;
fbi->fix.mmio_len = 0;
fbi->fix.accel = FB_ACCEL_NONE;
fbi->screen_base = (char __iomem *) fbdev->fb_mem;
au1200fb_update_fbinfo(fbi);
return 0;
}
/*-------------------------------------------------------------------------*/
static int au1200fb_setup(struct au1200fb_platdata *pd)
{
char *options = NULL;
char *this_opt, *endptr;
int num_panels = ARRAY_SIZE(known_lcd_panels);
int panel_idx = -1;
fb_get_options(DRIVER_NAME, &options);
if (!options)
goto out;
while ((this_opt = strsep(&options, ",")) != NULL) {
/* Panel option - can be panel name,
* "bs" for board-switch, or number/index */
if (!strncmp(this_opt, "panel:", 6)) {
int i;
long int li;
char *endptr;
this_opt += 6;
/* First check for index, which allows
* to short circuit this mess */
li = simple_strtol(this_opt, &endptr, 0);
if (*endptr == '\0')
panel_idx = (int)li;
else if (strcmp(this_opt, "bs") == 0)
panel_idx = pd->panel_index();
else {
for (i = 0; i < num_panels; i++) {
if (!strcmp(this_opt,
known_lcd_panels[i].name)) {
panel_idx = i;
break;
}
}
}
if ((panel_idx < 0) || (panel_idx >= num_panels))
print_warn("Panel %s not supported!", this_opt);
else
panel_index = panel_idx;
} else if (strncmp(this_opt, "nohwcursor", 10) == 0)
nohwcursor = 1;
else if (strncmp(this_opt, "devices:", 8) == 0) {
this_opt += 8;
device_count = simple_strtol(this_opt, &endptr, 0);
if ((device_count < 0) ||
(device_count > MAX_DEVICE_COUNT))
device_count = MAX_DEVICE_COUNT;
} else if (strncmp(this_opt, "wincfg:", 7) == 0) {
this_opt += 7;
window_index = simple_strtol(this_opt, &endptr, 0);
if ((window_index < 0) ||
(window_index >= ARRAY_SIZE(windows)))
window_index = DEFAULT_WINDOW_INDEX;
} else if (strncmp(this_opt, "off", 3) == 0)
return 1;
else
print_warn("Unsupported option \"%s\"", this_opt);
}
out:
return 0;
}
/* AU1200 LCD controller device driver */
static int au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
struct au1200fb_platdata *pd;
struct fb_info *fbi = NULL;
unsigned long page;
int bpp, plane, ret, irq;
print_info("" DRIVER_DESC "");
pd = dev->dev.platform_data;
if (!pd)
return -ENODEV;
/* Setup driver with options */
if (au1200fb_setup(pd))
return -ENODEV;
/* Point to the panel selected */
panel = &known_lcd_panels[panel_index];
win = &windows[window_index];
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
/* shut gcc up */
ret = 0;
fbdev = NULL;
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
win->w[plane].xres = panel->Xres;
if (win->w[plane].yres == 0)
win->w[plane].yres = panel->Yres;
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
if (!fbi)
goto failed;
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
fbdev->fb_info = fbi;
fbdev->pd = pd;
fbdev->plane = plane;
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
}
/*
* Set page reserved so that mmap will work. This is necessary
* since we'll be remapping normal memory.
*/
for (page = (unsigned long)fbdev->fb_phys;
page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
fbdev->fb_len);
page += PAGE_SIZE) {
SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
}
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Init FB data */
if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
goto failed;
/* Register new framebuffer */
ret = register_framebuffer(fbi);
if (ret < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
au1200fb_fb_set_par(fbi);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
if (plane == 0)
if (fb_prepare_logo(fbi, FB_ROTATE_UR)) {
/* Start display and show logo on boot */
fb_set_cmap(&fbi->cmap, fbi);
fb_show_logo(fbi, FB_ROTATE_UR);
}
#endif
}
/* Now hook interrupt too */
irq = platform_get_irq(dev, 0);
ret = request_irq(irq, au1200fb_handle_irq,
IRQF_SHARED, "lcd", (void *)dev);
if (ret) {
print_err("fail to request interrupt line %d (err: %d)",
irq, ret);
goto failed;
}
platform_set_drvdata(dev, pd);
/* Kickstart the panel */
au1200_setpanel(panel, pd);
return 0;
failed:
/* NOTE: This only does the current plane/window that failed; others are still active */
if (fbi) {
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
}
if (plane == 0)
free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
static int au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
/* Turn off the panel */
au1200_setpanel(NULL, pd);
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
fbdev = fbi->par;
/* Clean up all probe data */
unregister_framebuffer(fbi);
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
framebuffer_release(fbi);
_au1200fb_infos[plane] = NULL;
}
free_irq(platform_get_irq(dev, 0), (void *)dev);
return 0;
}
#ifdef CONFIG_PM
static int au1200fb_drv_suspend(struct device *dev)
{
struct au1200fb_platdata *pd = dev_get_drvdata(dev);
au1200_setpanel(NULL, pd);
lcd->outmask = 0;
au_sync();
return 0;
}
static int au1200fb_drv_resume(struct device *dev)
{
struct au1200fb_platdata *pd = dev_get_drvdata(dev);
struct fb_info *fbi;
int i;
/* Kickstart the panel */
au1200_setpanel(panel, pd);
for (i = 0; i < device_count; i++) {
fbi = _au1200fb_infos[i];
au1200fb_fb_set_par(fbi);
}
return 0;
}
static const struct dev_pm_ops au1200fb_pmops = {
.suspend = au1200fb_drv_suspend,
.resume = au1200fb_drv_resume,
.freeze = au1200fb_drv_suspend,
.thaw = au1200fb_drv_resume,
};
#define AU1200FB_PMOPS (&au1200fb_pmops)
#else
#define AU1200FB_PMOPS NULL
#endif /* CONFIG_PM */
static struct platform_driver au1200fb_driver = {
.driver = {
.name = "au1200-lcd",
.owner = THIS_MODULE,
.pm = AU1200FB_PMOPS,
},
.probe = au1200fb_drv_probe,
.remove = au1200fb_drv_remove,
};
/*-------------------------------------------------------------------------*/
static int __init au1200fb_init(void)
{
return platform_driver_register(&au1200fb_driver);
}
static void __exit au1200fb_cleanup(void)
{
platform_driver_unregister(&au1200fb_driver);
}
module_init(au1200fb_init);
module_exit(au1200fb_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5755_2 |
crossvul-cpp_data_good_2020_1 | /*
* contrib/hstore/hstore_io.c
*/
#include "postgres.h"
#include <ctype.h>
#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "lib/stringinfo.h"
#include "libpq/pqformat.h"
#include "utils/builtins.h"
#include "utils/json.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/typcache.h"
#include "hstore.h"
PG_MODULE_MAGIC;
/* old names for C functions */
HSTORE_POLLUTE(hstore_from_text, tconvert);
typedef struct
{
char *begin;
char *ptr;
char *cur;
char *word;
int wordlen;
Pairs *pairs;
int pcur;
int plen;
} HSParser;
#define RESIZEPRSBUF \
do { \
if ( state->cur - state->word + 1 >= state->wordlen ) \
{ \
int32 clen = state->cur - state->word; \
state->wordlen *= 2; \
state->word = (char*)repalloc( (void*)state->word, state->wordlen ); \
state->cur = state->word + clen; \
} \
} while (0)
#define GV_WAITVAL 0
#define GV_INVAL 1
#define GV_INESCVAL 2
#define GV_WAITESCIN 3
#define GV_WAITESCESCIN 4
static bool
get_val(HSParser *state, bool ignoreeq, bool *escaped)
{
int st = GV_WAITVAL;
state->wordlen = 32;
state->cur = state->word = palloc(state->wordlen);
*escaped = false;
while (1)
{
if (st == GV_WAITVAL)
{
if (*(state->ptr) == '"')
{
*escaped = true;
st = GV_INESCVAL;
}
else if (*(state->ptr) == '\0')
{
return false;
}
else if (*(state->ptr) == '=' && !ignoreeq)
{
elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
else if (*(state->ptr) == '\\')
{
st = GV_WAITESCIN;
}
else if (!isspace((unsigned char) *(state->ptr)))
{
*(state->cur) = *(state->ptr);
state->cur++;
st = GV_INVAL;
}
}
else if (st == GV_INVAL)
{
if (*(state->ptr) == '\\')
{
st = GV_WAITESCIN;
}
else if (*(state->ptr) == '=' && !ignoreeq)
{
state->ptr--;
return true;
}
else if (*(state->ptr) == ',' && ignoreeq)
{
state->ptr--;
return true;
}
else if (isspace((unsigned char) *(state->ptr)))
{
return true;
}
else if (*(state->ptr) == '\0')
{
state->ptr--;
return true;
}
else
{
RESIZEPRSBUF;
*(state->cur) = *(state->ptr);
state->cur++;
}
}
else if (st == GV_INESCVAL)
{
if (*(state->ptr) == '\\')
{
st = GV_WAITESCESCIN;
}
else if (*(state->ptr) == '"')
{
return true;
}
else if (*(state->ptr) == '\0')
{
elog(ERROR, "Unexpected end of string");
}
else
{
RESIZEPRSBUF;
*(state->cur) = *(state->ptr);
state->cur++;
}
}
else if (st == GV_WAITESCIN)
{
if (*(state->ptr) == '\0')
elog(ERROR, "Unexpected end of string");
RESIZEPRSBUF;
*(state->cur) = *(state->ptr);
state->cur++;
st = GV_INVAL;
}
else if (st == GV_WAITESCESCIN)
{
if (*(state->ptr) == '\0')
elog(ERROR, "Unexpected end of string");
RESIZEPRSBUF;
*(state->cur) = *(state->ptr);
state->cur++;
st = GV_INESCVAL;
}
else
elog(ERROR, "Unknown state %d at position line %d in file '%s'", st, __LINE__, __FILE__);
state->ptr++;
}
}
#define WKEY 0
#define WVAL 1
#define WEQ 2
#define WGT 3
#define WDEL 4
static void
parse_hstore(HSParser *state)
{
int st = WKEY;
bool escaped = false;
state->plen = 16;
state->pairs = (Pairs *) palloc(sizeof(Pairs) * state->plen);
state->pcur = 0;
state->ptr = state->begin;
state->word = NULL;
while (1)
{
if (st == WKEY)
{
if (!get_val(state, false, &escaped))
return;
if (state->pcur >= state->plen)
{
state->plen *= 2;
state->pairs = (Pairs *) repalloc(state->pairs, sizeof(Pairs) * state->plen);
}
state->pairs[state->pcur].key = state->word;
state->pairs[state->pcur].keylen = hstoreCheckKeyLen(state->cur - state->word);
state->pairs[state->pcur].val = NULL;
state->word = NULL;
st = WEQ;
}
else if (st == WEQ)
{
if (*(state->ptr) == '=')
{
st = WGT;
}
else if (*(state->ptr) == '\0')
{
elog(ERROR, "Unexpected end of string");
}
else if (!isspace((unsigned char) *(state->ptr)))
{
elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
}
else if (st == WGT)
{
if (*(state->ptr) == '>')
{
st = WVAL;
}
else if (*(state->ptr) == '\0')
{
elog(ERROR, "Unexpected end of string");
}
else
{
elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
}
else if (st == WVAL)
{
if (!get_val(state, true, &escaped))
elog(ERROR, "Unexpected end of string");
state->pairs[state->pcur].val = state->word;
state->pairs[state->pcur].vallen = hstoreCheckValLen(state->cur - state->word);
state->pairs[state->pcur].isnull = false;
state->pairs[state->pcur].needfree = true;
if (state->cur - state->word == 4 && !escaped)
{
state->word[4] = '\0';
if (0 == pg_strcasecmp(state->word, "null"))
state->pairs[state->pcur].isnull = true;
}
state->word = NULL;
state->pcur++;
st = WDEL;
}
else if (st == WDEL)
{
if (*(state->ptr) == ',')
{
st = WKEY;
}
else if (*(state->ptr) == '\0')
{
return;
}
else if (!isspace((unsigned char) *(state->ptr)))
{
elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
}
else
elog(ERROR, "Unknown state %d at line %d in file '%s'", st, __LINE__, __FILE__);
state->ptr++;
}
}
static int
comparePairs(const void *a, const void *b)
{
const Pairs *pa = a;
const Pairs *pb = b;
if (pa->keylen == pb->keylen)
{
int res = memcmp(pa->key, pb->key, pa->keylen);
if (res)
return res;
/* guarantee that needfree will be later */
if (pb->needfree == pa->needfree)
return 0;
else if (pa->needfree)
return 1;
else
return -1;
}
return (pa->keylen > pb->keylen) ? 1 : -1;
}
/*
* this code still respects pairs.needfree, even though in general
* it should never be called in a context where anything needs freeing.
* we keep it because (a) those calls are in a rare code path anyway,
* and (b) who knows whether they might be needed by some caller.
*/
int
hstoreUniquePairs(Pairs *a, int32 l, int32 *buflen)
{
Pairs *ptr,
*res;
*buflen = 0;
if (l < 2)
{
if (l == 1)
*buflen = a->keylen + ((a->isnull) ? 0 : a->vallen);
return l;
}
qsort((void *) a, l, sizeof(Pairs), comparePairs);
ptr = a + 1;
res = a;
while (ptr - a < l)
{
if (ptr->keylen == res->keylen &&
memcmp(ptr->key, res->key, res->keylen) == 0)
{
if (ptr->needfree)
{
pfree(ptr->key);
pfree(ptr->val);
}
}
else
{
*buflen += res->keylen + ((res->isnull) ? 0 : res->vallen);
res++;
memcpy(res, ptr, sizeof(Pairs));
}
ptr++;
}
*buflen += res->keylen + ((res->isnull) ? 0 : res->vallen);
return res + 1 - a;
}
size_t
hstoreCheckKeyLen(size_t len)
{
if (len > HSTORE_MAX_KEY_LEN)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
errmsg("string too long for hstore key")));
return len;
}
size_t
hstoreCheckValLen(size_t len)
{
if (len > HSTORE_MAX_VALUE_LEN)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
errmsg("string too long for hstore value")));
return len;
}
HStore *
hstorePairs(Pairs *pairs, int32 pcount, int32 buflen)
{
HStore *out;
HEntry *entry;
char *ptr;
char *buf;
int32 len;
int32 i;
len = CALCDATASIZE(pcount, buflen);
out = palloc(len);
SET_VARSIZE(out, len);
HS_SETCOUNT(out, pcount);
if (pcount == 0)
return out;
entry = ARRPTR(out);
buf = ptr = STRPTR(out);
for (i = 0; i < pcount; i++)
HS_ADDITEM(entry, buf, ptr, pairs[i]);
HS_FINALIZE(out, pcount, buf, ptr);
return out;
}
PG_FUNCTION_INFO_V1(hstore_in);
Datum hstore_in(PG_FUNCTION_ARGS);
Datum
hstore_in(PG_FUNCTION_ARGS)
{
HSParser state;
int32 buflen;
HStore *out;
state.begin = PG_GETARG_CSTRING(0);
parse_hstore(&state);
state.pcur = hstoreUniquePairs(state.pairs, state.pcur, &buflen);
out = hstorePairs(state.pairs, state.pcur, buflen);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_recv);
Datum hstore_recv(PG_FUNCTION_ARGS);
Datum
hstore_recv(PG_FUNCTION_ARGS)
{
int32 buflen;
HStore *out;
Pairs *pairs;
int32 i;
int32 pcount;
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
pcount = pq_getmsgint(buf, 4);
if (pcount == 0)
{
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
}
if (pcount < 0 || pcount > MaxAllocSize / sizeof(Pairs))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
pcount, (int) (MaxAllocSize / sizeof(Pairs)))));
pairs = palloc(pcount * sizeof(Pairs));
for (i = 0; i < pcount; ++i)
{
int rawlen = pq_getmsgint(buf, 4);
int len;
if (rawlen < 0)
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value not allowed for hstore key")));
pairs[i].key = pq_getmsgtext(buf, rawlen, &len);
pairs[i].keylen = hstoreCheckKeyLen(len);
pairs[i].needfree = true;
rawlen = pq_getmsgint(buf, 4);
if (rawlen < 0)
{
pairs[i].val = NULL;
pairs[i].vallen = 0;
pairs[i].isnull = true;
}
else
{
pairs[i].val = pq_getmsgtext(buf, rawlen, &len);
pairs[i].vallen = hstoreCheckValLen(len);
pairs[i].isnull = false;
}
}
pcount = hstoreUniquePairs(pairs, pcount, &buflen);
out = hstorePairs(pairs, pcount, buflen);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_from_text);
Datum hstore_from_text(PG_FUNCTION_ARGS);
Datum
hstore_from_text(PG_FUNCTION_ARGS)
{
text *key;
text *val = NULL;
Pairs p;
HStore *out;
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
p.needfree = false;
key = PG_GETARG_TEXT_PP(0);
p.key = VARDATA_ANY(key);
p.keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key));
if (PG_ARGISNULL(1))
{
p.vallen = 0;
p.isnull = true;
}
else
{
val = PG_GETARG_TEXT_PP(1);
p.val = VARDATA_ANY(val);
p.vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(val));
p.isnull = false;
}
out = hstorePairs(&p, 1, p.keylen + p.vallen);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_from_arrays);
Datum hstore_from_arrays(PG_FUNCTION_ARGS);
Datum
hstore_from_arrays(PG_FUNCTION_ARGS)
{
int32 buflen;
HStore *out;
Pairs *pairs;
Datum *key_datums;
bool *key_nulls;
int key_count;
Datum *value_datums;
bool *value_nulls;
int value_count;
ArrayType *key_array;
ArrayType *value_array;
int i;
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
key_array = PG_GETARG_ARRAYTYPE_P(0);
Assert(ARR_ELEMTYPE(key_array) == TEXTOID);
/*
* must check >1 rather than != 1 because empty arrays have 0 dimensions,
* not 1
*/
if (ARR_NDIM(key_array) > 1)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
deconstruct_array(key_array,
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
/* see discussion in hstoreArrayToPairs() */
if (key_count > MaxAllocSize / sizeof(Pairs))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
key_count, (int) (MaxAllocSize / sizeof(Pairs)))));
/* value_array might be NULL */
if (PG_ARGISNULL(1))
{
value_array = NULL;
value_count = key_count;
value_datums = NULL;
value_nulls = NULL;
}
else
{
value_array = PG_GETARG_ARRAYTYPE_P(1);
Assert(ARR_ELEMTYPE(value_array) == TEXTOID);
if (ARR_NDIM(value_array) > 1)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
if ((ARR_NDIM(key_array) > 0 || ARR_NDIM(value_array) > 0) &&
(ARR_NDIM(key_array) != ARR_NDIM(value_array) ||
ARR_DIMS(key_array)[0] != ARR_DIMS(value_array)[0] ||
ARR_LBOUND(key_array)[0] != ARR_LBOUND(value_array)[0]))
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("arrays must have same bounds")));
deconstruct_array(value_array,
TEXTOID, -1, false, 'i',
&value_datums, &value_nulls, &value_count);
Assert(key_count == value_count);
}
pairs = palloc(key_count * sizeof(Pairs));
for (i = 0; i < key_count; ++i)
{
if (key_nulls[i])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value not allowed for hstore key")));
if (!value_nulls || value_nulls[i])
{
pairs[i].key = VARDATA_ANY(key_datums[i]);
pairs[i].val = NULL;
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key_datums[i]));
pairs[i].vallen = 4;
pairs[i].isnull = true;
pairs[i].needfree = false;
}
else
{
pairs[i].key = VARDATA_ANY(key_datums[i]);
pairs[i].val = VARDATA_ANY(value_datums[i]);
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(key_datums[i]));
pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(value_datums[i]));
pairs[i].isnull = false;
pairs[i].needfree = false;
}
}
key_count = hstoreUniquePairs(pairs, key_count, &buflen);
out = hstorePairs(pairs, key_count, buflen);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_from_array);
Datum hstore_from_array(PG_FUNCTION_ARGS);
Datum
hstore_from_array(PG_FUNCTION_ARGS)
{
ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0);
int ndims = ARR_NDIM(in_array);
int count;
int32 buflen;
HStore *out;
Pairs *pairs;
Datum *in_datums;
bool *in_nulls;
int in_count;
int i;
Assert(ARR_ELEMTYPE(in_array) == TEXTOID);
switch (ndims)
{
case 0:
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
case 1:
if ((ARR_DIMS(in_array)[0]) % 2)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must have even number of elements")));
break;
case 2:
if ((ARR_DIMS(in_array)[1]) != 2)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must have two columns")));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
}
deconstruct_array(in_array,
TEXTOID, -1, false, 'i',
&in_datums, &in_nulls, &in_count);
count = in_count / 2;
/* see discussion in hstoreArrayToPairs() */
if (count > MaxAllocSize / sizeof(Pairs))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
count, (int) (MaxAllocSize / sizeof(Pairs)))));
pairs = palloc(count * sizeof(Pairs));
for (i = 0; i < count; ++i)
{
if (in_nulls[i * 2])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value not allowed for hstore key")));
if (in_nulls[i * 2 + 1])
{
pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = NULL;
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = 4;
pairs[i].isnull = true;
pairs[i].needfree = false;
}
else
{
pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = VARDATA_ANY(in_datums[i * 2 + 1]);
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i * 2 + 1]));
pairs[i].isnull = false;
pairs[i].needfree = false;
}
}
count = hstoreUniquePairs(pairs, count, &buflen);
out = hstorePairs(pairs, count, buflen);
PG_RETURN_POINTER(out);
}
/* most of hstore_from_record is shamelessly swiped from record_out */
/*
* structure to cache metadata needed for record I/O
*/
typedef struct ColumnIOData
{
Oid column_type;
Oid typiofunc;
Oid typioparam;
FmgrInfo proc;
} ColumnIOData;
typedef struct RecordIOData
{
Oid record_type;
int32 record_typmod;
int ncolumns;
ColumnIOData columns[1]; /* VARIABLE LENGTH ARRAY */
} RecordIOData;
PG_FUNCTION_INFO_V1(hstore_from_record);
Datum hstore_from_record(PG_FUNCTION_ARGS);
Datum
hstore_from_record(PG_FUNCTION_ARGS)
{
HeapTupleHeader rec;
int32 buflen;
HStore *out;
Pairs *pairs;
Oid tupType;
int32 tupTypmod;
TupleDesc tupdesc;
HeapTupleData tuple;
RecordIOData *my_extra;
int ncolumns;
int i,
j;
Datum *values;
bool *nulls;
if (PG_ARGISNULL(0))
{
Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
/*
* have no tuple to look at, so the only source of type info is the
* argtype. The lookup_rowtype_tupdesc call below will error out if we
* don't have a known composite type oid here.
*/
tupType = argtype;
tupTypmod = -1;
rec = NULL;
}
else
{
rec = PG_GETARG_HEAPTUPLEHEADER(0);
/* Extract type info from the tuple itself */
tupType = HeapTupleHeaderGetTypeId(rec);
tupTypmod = HeapTupleHeaderGetTypMod(rec);
}
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
/*
* We arrange to look up the needed I/O info just once per series of
* calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
my_extra->ncolumns != ncolumns)
{
fcinfo->flinfo->fn_extra =
MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
sizeof(RecordIOData) - sizeof(ColumnIOData)
+ ncolumns * sizeof(ColumnIOData));
my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
my_extra->record_type = InvalidOid;
my_extra->record_typmod = 0;
}
if (my_extra->record_type != tupType ||
my_extra->record_typmod != tupTypmod)
{
MemSet(my_extra, 0,
sizeof(RecordIOData) - sizeof(ColumnIOData)
+ ncolumns * sizeof(ColumnIOData));
my_extra->record_type = tupType;
my_extra->record_typmod = tupTypmod;
my_extra->ncolumns = ncolumns;
}
Assert(ncolumns <= MaxTupleAttributeNumber); /* thus, no overflow */
pairs = palloc(ncolumns * sizeof(Pairs));
if (rec)
{
/* Build a temporary HeapTuple control structure */
tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
ItemPointerSetInvalid(&(tuple.t_self));
tuple.t_tableOid = InvalidOid;
tuple.t_data = rec;
values = (Datum *) palloc(ncolumns * sizeof(Datum));
nulls = (bool *) palloc(ncolumns * sizeof(bool));
/* Break down the tuple into fields */
heap_deform_tuple(&tuple, tupdesc, values, nulls);
}
else
{
values = NULL;
nulls = NULL;
}
for (i = 0, j = 0; i < ncolumns; ++i)
{
ColumnIOData *column_info = &my_extra->columns[i];
Oid column_type = tupdesc->attrs[i]->atttypid;
char *value;
/* Ignore dropped columns in datatype */
if (tupdesc->attrs[i]->attisdropped)
continue;
pairs[j].key = NameStr(tupdesc->attrs[i]->attname);
pairs[j].keylen = hstoreCheckKeyLen(strlen(NameStr(tupdesc->attrs[i]->attname)));
if (!nulls || nulls[i])
{
pairs[j].val = NULL;
pairs[j].vallen = 4;
pairs[j].isnull = true;
pairs[j].needfree = false;
++j;
continue;
}
/*
* Convert the column value to text
*/
if (column_info->column_type != column_type)
{
bool typIsVarlena;
getTypeOutputInfo(column_type,
&column_info->typiofunc,
&typIsVarlena);
fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
fcinfo->flinfo->fn_mcxt);
column_info->column_type = column_type;
}
value = OutputFunctionCall(&column_info->proc, values[i]);
pairs[j].val = value;
pairs[j].vallen = hstoreCheckValLen(strlen(value));
pairs[j].isnull = false;
pairs[j].needfree = false;
++j;
}
ncolumns = hstoreUniquePairs(pairs, j, &buflen);
out = hstorePairs(pairs, ncolumns, buflen);
ReleaseTupleDesc(tupdesc);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_populate_record);
Datum hstore_populate_record(PG_FUNCTION_ARGS);
Datum
hstore_populate_record(PG_FUNCTION_ARGS)
{
Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
HStore *hs;
HEntry *entries;
char *ptr;
HeapTupleHeader rec;
Oid tupType;
int32 tupTypmod;
TupleDesc tupdesc;
HeapTupleData tuple;
HeapTuple rettuple;
RecordIOData *my_extra;
int ncolumns;
int i;
Datum *values;
bool *nulls;
if (!type_is_rowtype(argtype))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("first argument must be a rowtype")));
if (PG_ARGISNULL(0))
{
if (PG_ARGISNULL(1))
PG_RETURN_NULL();
rec = NULL;
/*
* have no tuple to look at, so the only source of type info is the
* argtype. The lookup_rowtype_tupdesc call below will error out if we
* don't have a known composite type oid here.
*/
tupType = argtype;
tupTypmod = -1;
}
else
{
rec = PG_GETARG_HEAPTUPLEHEADER(0);
if (PG_ARGISNULL(1))
PG_RETURN_POINTER(rec);
/* Extract type info from the tuple itself */
tupType = HeapTupleHeaderGetTypeId(rec);
tupTypmod = HeapTupleHeaderGetTypMod(rec);
}
hs = PG_GETARG_HS(1);
entries = ARRPTR(hs);
ptr = STRPTR(hs);
/*
* if the input hstore is empty, we can only skip the rest if we were
* passed in a non-null record, since otherwise there may be issues with
* domain nulls.
*/
if (HS_COUNT(hs) == 0 && rec)
PG_RETURN_POINTER(rec);
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
if (rec)
{
/* Build a temporary HeapTuple control structure */
tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
ItemPointerSetInvalid(&(tuple.t_self));
tuple.t_tableOid = InvalidOid;
tuple.t_data = rec;
}
/*
* We arrange to look up the needed I/O info just once per series of
* calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
my_extra->ncolumns != ncolumns)
{
fcinfo->flinfo->fn_extra =
MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
sizeof(RecordIOData) - sizeof(ColumnIOData)
+ ncolumns * sizeof(ColumnIOData));
my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra;
my_extra->record_type = InvalidOid;
my_extra->record_typmod = 0;
}
if (my_extra->record_type != tupType ||
my_extra->record_typmod != tupTypmod)
{
MemSet(my_extra, 0,
sizeof(RecordIOData) - sizeof(ColumnIOData)
+ ncolumns * sizeof(ColumnIOData));
my_extra->record_type = tupType;
my_extra->record_typmod = tupTypmod;
my_extra->ncolumns = ncolumns;
}
values = (Datum *) palloc(ncolumns * sizeof(Datum));
nulls = (bool *) palloc(ncolumns * sizeof(bool));
if (rec)
{
/* Break down the tuple into fields */
heap_deform_tuple(&tuple, tupdesc, values, nulls);
}
else
{
for (i = 0; i < ncolumns; ++i)
{
values[i] = (Datum) 0;
nulls[i] = true;
}
}
for (i = 0; i < ncolumns; ++i)
{
ColumnIOData *column_info = &my_extra->columns[i];
Oid column_type = tupdesc->attrs[i]->atttypid;
char *value;
int idx;
int vallen;
/* Ignore dropped columns in datatype */
if (tupdesc->attrs[i]->attisdropped)
{
nulls[i] = true;
continue;
}
idx = hstoreFindKey(hs, 0,
NameStr(tupdesc->attrs[i]->attname),
strlen(NameStr(tupdesc->attrs[i]->attname)));
/*
* we can't just skip here if the key wasn't found since we might have
* a domain to deal with. If we were passed in a non-null record
* datum, we assume that the existing values are valid (if they're
* not, then it's not our fault), but if we were passed in a null,
* then every field which we don't populate needs to be run through
* the input function just in case it's a domain type.
*/
if (idx < 0 && rec)
continue;
/*
* Prepare to convert the column value from text
*/
if (column_info->column_type != column_type)
{
getTypeInputInfo(column_type,
&column_info->typiofunc,
&column_info->typioparam);
fmgr_info_cxt(column_info->typiofunc, &column_info->proc,
fcinfo->flinfo->fn_mcxt);
column_info->column_type = column_type;
}
if (idx < 0 || HS_VALISNULL(entries, idx))
{
/*
* need InputFunctionCall to happen even for nulls, so that domain
* checks are done
*/
values[i] = InputFunctionCall(&column_info->proc, NULL,
column_info->typioparam,
tupdesc->attrs[i]->atttypmod);
nulls[i] = true;
}
else
{
vallen = HS_VALLEN(entries, idx);
value = palloc(1 + vallen);
memcpy(value, HS_VAL(entries, ptr, idx), vallen);
value[vallen] = 0;
values[i] = InputFunctionCall(&column_info->proc, value,
column_info->typioparam,
tupdesc->attrs[i]->atttypmod);
nulls[i] = false;
}
}
rettuple = heap_form_tuple(tupdesc, values, nulls);
ReleaseTupleDesc(tupdesc);
PG_RETURN_DATUM(HeapTupleGetDatum(rettuple));
}
static char *
cpw(char *dst, char *src, int len)
{
char *ptr = src;
while (ptr - src < len)
{
if (*ptr == '"' || *ptr == '\\')
*dst++ = '\\';
*dst++ = *ptr++;
}
return dst;
}
PG_FUNCTION_INFO_V1(hstore_out);
Datum hstore_out(PG_FUNCTION_ARGS);
Datum
hstore_out(PG_FUNCTION_ARGS)
{
HStore *in = PG_GETARG_HS(0);
int buflen,
i;
int count = HS_COUNT(in);
char *out,
*ptr;
char *base = STRPTR(in);
HEntry *entries = ARRPTR(in);
if (count == 0)
PG_RETURN_CSTRING(pstrdup(""));
buflen = 0;
/*
* this loop overestimates due to pessimistic assumptions about escaping,
* so very large hstore values can't be output. this could be fixed, but
* many other data types probably have the same issue. This replaced code
* that used the original varlena size for calculations, which was wrong
* in some subtle ways.
*/
for (i = 0; i < count; i++)
{
/* include "" and => and comma-space */
buflen += 6 + 2 * HS_KEYLEN(entries, i);
/* include "" only if nonnull */
buflen += 2 + (HS_VALISNULL(entries, i)
? 2
: 2 * HS_VALLEN(entries, i));
}
out = ptr = palloc(buflen);
for (i = 0; i < count; i++)
{
*ptr++ = '"';
ptr = cpw(ptr, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
*ptr++ = '"';
*ptr++ = '=';
*ptr++ = '>';
if (HS_VALISNULL(entries, i))
{
*ptr++ = 'N';
*ptr++ = 'U';
*ptr++ = 'L';
*ptr++ = 'L';
}
else
{
*ptr++ = '"';
ptr = cpw(ptr, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
*ptr++ = '"';
}
if (i + 1 != count)
{
*ptr++ = ',';
*ptr++ = ' ';
}
}
*ptr = '\0';
PG_RETURN_CSTRING(out);
}
PG_FUNCTION_INFO_V1(hstore_send);
Datum hstore_send(PG_FUNCTION_ARGS);
Datum
hstore_send(PG_FUNCTION_ARGS)
{
HStore *in = PG_GETARG_HS(0);
int i;
int count = HS_COUNT(in);
char *base = STRPTR(in);
HEntry *entries = ARRPTR(in);
StringInfoData buf;
pq_begintypsend(&buf);
pq_sendint(&buf, count, 4);
for (i = 0; i < count; i++)
{
int32 keylen = HS_KEYLEN(entries, i);
pq_sendint(&buf, keylen, 4);
pq_sendtext(&buf, HS_KEY(entries, base, i), keylen);
if (HS_VALISNULL(entries, i))
{
pq_sendint(&buf, -1, 4);
}
else
{
int32 vallen = HS_VALLEN(entries, i);
pq_sendint(&buf, vallen, 4);
pq_sendtext(&buf, HS_VAL(entries, base, i), vallen);
}
}
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
/*
* hstore_to_json_loose
*
* This is a heuristic conversion to json which treats
* 't' and 'f' as booleans and strings that look like numbers as numbers,
* as long as they don't start with a leading zero followed by another digit
* (think zip codes or phone numbers starting with 0).
*/
PG_FUNCTION_INFO_V1(hstore_to_json_loose);
Datum hstore_to_json_loose(PG_FUNCTION_ARGS);
Datum
hstore_to_json_loose(PG_FUNCTION_ARGS)
{
HStore *in = PG_GETARG_HS(0);
int buflen,
i;
int count = HS_COUNT(in);
char *out,
*ptr;
char *base = STRPTR(in);
HEntry *entries = ARRPTR(in);
bool is_number;
StringInfo src,
dst;
if (count == 0)
PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
buflen = 3;
/*
* Formula adjusted slightly from the logic in hstore_out. We have to take
* account of out treatment of booleans to be a bit more pessimistic about
* the length of values.
*/
for (i = 0; i < count; i++)
{
/* include "" and colon-space and comma-space */
buflen += 6 + 2 * HS_KEYLEN(entries, i);
/* include "" only if nonnull */
buflen += 3 + (HS_VALISNULL(entries, i)
? 1
: 2 * HS_VALLEN(entries, i));
}
out = ptr = palloc(buflen);
src = makeStringInfo();
dst = makeStringInfo();
*ptr++ = '{';
for (i = 0; i < count; i++)
{
resetStringInfo(src);
resetStringInfo(dst);
appendBinaryStringInfo(src, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
escape_json(dst, src->data);
strncpy(ptr, dst->data, dst->len);
ptr += dst->len;
*ptr++ = ':';
*ptr++ = ' ';
resetStringInfo(dst);
if (HS_VALISNULL(entries, i))
appendStringInfoString(dst, "null");
/* guess that values of 't' or 'f' are booleans */
else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't')
appendStringInfoString(dst, "true");
else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f')
appendStringInfoString(dst, "false");
else
{
is_number = false;
resetStringInfo(src);
appendBinaryStringInfo(src, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
/*
* don't treat something with a leading zero followed by another
* digit as numeric - could be a zip code or similar
*/
if (src->len > 0 &&
!(src->data[0] == '0' &&
isdigit((unsigned char) src->data[1])) &&
strspn(src->data, "+-0123456789Ee.") == src->len)
{
/*
* might be a number. See if we can input it as a numeric
* value. Ignore any actual parsed value.
*/
char *endptr = "junk";
long lval;
lval = strtol(src->data, &endptr, 10);
(void) lval;
if (*endptr == '\0')
{
/*
* strol man page says this means the whole string is
* valid
*/
is_number = true;
}
else
{
/* not an int - try a double */
double dval;
dval = strtod(src->data, &endptr);
(void) dval;
if (*endptr == '\0')
is_number = true;
}
}
if (is_number)
appendBinaryStringInfo(dst, src->data, src->len);
else
escape_json(dst, src->data);
}
strncpy(ptr, dst->data, dst->len);
ptr += dst->len;
if (i + 1 != count)
{
*ptr++ = ',';
*ptr++ = ' ';
}
}
*ptr++ = '}';
*ptr = '\0';
PG_RETURN_TEXT_P(cstring_to_text(out));
}
PG_FUNCTION_INFO_V1(hstore_to_json);
Datum hstore_to_json(PG_FUNCTION_ARGS);
Datum
hstore_to_json(PG_FUNCTION_ARGS)
{
HStore *in = PG_GETARG_HS(0);
int buflen,
i;
int count = HS_COUNT(in);
char *out,
*ptr;
char *base = STRPTR(in);
HEntry *entries = ARRPTR(in);
StringInfo src,
dst;
if (count == 0)
PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
buflen = 3;
/*
* Formula adjusted slightly from the logic in hstore_out. We have to take
* account of out treatment of booleans to be a bit more pessimistic about
* the length of values.
*/
for (i = 0; i < count; i++)
{
/* include "" and colon-space and comma-space */
buflen += 6 + 2 * HS_KEYLEN(entries, i);
/* include "" only if nonnull */
buflen += 3 + (HS_VALISNULL(entries, i)
? 1
: 2 * HS_VALLEN(entries, i));
}
out = ptr = palloc(buflen);
src = makeStringInfo();
dst = makeStringInfo();
*ptr++ = '{';
for (i = 0; i < count; i++)
{
resetStringInfo(src);
resetStringInfo(dst);
appendBinaryStringInfo(src, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
escape_json(dst, src->data);
strncpy(ptr, dst->data, dst->len);
ptr += dst->len;
*ptr++ = ':';
*ptr++ = ' ';
resetStringInfo(dst);
if (HS_VALISNULL(entries, i))
appendStringInfoString(dst, "null");
else
{
resetStringInfo(src);
appendBinaryStringInfo(src, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
escape_json(dst, src->data);
}
strncpy(ptr, dst->data, dst->len);
ptr += dst->len;
if (i + 1 != count)
{
*ptr++ = ',';
*ptr++ = ' ';
}
}
*ptr++ = '}';
*ptr = '\0';
PG_RETURN_TEXT_P(cstring_to_text(out));
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_1 |
crossvul-cpp_data_good_5793_0 |
/*
* Local APIC virtualization
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright (C) 2007 Novell
* Copyright (C) 2007 Intel
* Copyright 2009 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Dor Laor <dor.laor@qumranet.com>
* Gregory Haskins <ghaskins@novell.com>
* Yaozu (Eddie) Dong <eddie.dong@intel.com>
*
* Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include "kvm_cache_regs.h"
#include "irq.h"
#include "trace.h"
#include "x86.h"
#include "cpuid.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else
#define mod_64(x, y) ((x) % (y))
#endif
#define PRId64 "d"
#define PRIx64 "llx"
#define PRIu64 "u"
#define PRIo64 "o"
#define APIC_BUS_CYCLE_NS 1
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
#define apic_debug(fmt, arg...)
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
#define LAPIC_MMIO_LENGTH (1 << 12)
/* followed define is not in apicdef.h */
#define APIC_SHORT_MASK 0xc0000
#define APIC_DEST_NOSHORT 0x0
#define APIC_DEST_MASK 0x800
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
static unsigned int min_timer_period_us = 500;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
*((u32 *) (apic->regs + reg_off)) = val;
}
static inline int apic_test_vector(int vec, void *bitmap)
{
return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return apic_test_vector(vector, apic->regs + APIC_ISR) ||
apic_test_vector(vector, apic->regs + APIC_IRR);
}
static inline void apic_set_vector(int vec, void *bitmap)
{
set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline void apic_clear_vector(int vec, void *bitmap)
{
clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline int __apic_test_and_set_vector(int vec, void *bitmap)
{
return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
{
return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
struct static_key_deferred apic_hw_disabled __read_mostly;
struct static_key_deferred apic_sw_disabled __read_mostly;
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{
if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) {
if (val & APIC_SPIV_APIC_ENABLED)
static_key_slow_dec_deferred(&apic_sw_disabled);
else
static_key_slow_inc(&apic_sw_disabled.key);
}
apic_set_reg(apic, APIC_SPIV, val);
}
static inline int apic_enabled(struct kvm_lapic *apic)
{
return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
}
#define LVT_MASK \
(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
#define LINT_MASK \
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
static inline int kvm_apic_id(struct kvm_lapic *apic)
{
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
mutex_lock(&kvm->arch.apic_map_lock);
if (!new)
goto out;
new->ldr_bits = 8;
/* flat mode is default */
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
* table. After reset APICs are in xapic/flat mode, so if we
* find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
new->cid_mask = new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
new->cid_shift = 4;
new->cid_mask = 0xf;
new->lid_mask = 0xf;
}
new->phys_map[kvm_apic_id(apic)] = apic;
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
if (lid)
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
kfree_rcu(old, rcu);
kvm_vcpu_request_scan_ioapic(kvm);
}
static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
{
apic_set_reg(apic, APIC_ID, id << 24);
recalculate_apic_map(apic->vcpu->kvm);
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
apic_set_reg(apic, APIC_LDR, id);
recalculate_apic_map(apic->vcpu->kvm);
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
{
return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
}
static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
{
return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
}
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
}
static inline int apic_lvtt_period(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
}
static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) ==
APIC_LVT_TIMER_TSCDEADLINE);
}
static inline int apic_lvt_nmi_mode(u32 lvt_val)
{
return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
}
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *feat;
u32 v = APIC_VERSION;
if (!kvm_vcpu_has_lapic(vcpu))
return;
feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
v |= APIC_LVR_DIRECTED_EOI;
apic_set_reg(apic, APIC_LVR, v);
}
static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
LVT_MASK | APIC_MODE_MASK, /* LVTPC */
LINT_MASK, LINT_MASK, /* LVT0-1 */
LVT_MASK /* LVTERR */
};
static int find_highest_vector(void *bitmap)
{
int vec;
u32 *reg;
for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
vec >= 0; vec -= APIC_VECTORS_PER_REG) {
reg = bitmap + REG_POS(vec);
if (*reg)
return fls(*reg) - 1 + vec;
}
return -1;
}
static u8 count_vectors(void *bitmap)
{
int vec;
u32 *reg;
u8 count = 0;
for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
reg = bitmap + REG_POS(vec);
count += hweight32(*reg);
}
return count;
}
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
{
u32 i, pir_val;
struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i <= 7; i++) {
pir_val = xchg(&pir[i], 0);
if (pir_val)
*((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
}
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = true;
apic_set_vector(vec, apic->regs + APIC_IRR);
}
static inline int apic_search_irr(struct kvm_lapic *apic)
{
return find_highest_vector(apic->regs + APIC_IRR);
}
static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{
int result;
/*
* Note that irr_pending is just a hint. It will be always
* true with virtual interrupt delivery enabled.
*/
if (!apic->irr_pending)
return -1;
kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
result = apic_search_irr(apic);
ASSERT(result == -1 || result >= 16);
return result;
}
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = false;
apic_clear_vector(vec, apic->regs + APIC_IRR);
if (apic_search_irr(apic) != -1)
apic->irr_pending = true;
}
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
/*
* ISR (in service register) bit is set when injecting an interrupt.
* The highest vector is injected. Thus the latest bit set matches
* the highest bit in ISR.
*/
apic->highest_isr_cache = vec;
}
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{
if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
apic->highest_isr_cache = -1;
}
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
int highest_irr;
/* This may race with setting of irr in __apic_accept_irq() and
* value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
* will cause vmexit immediately and the value will be recalculated
* on the next vmentry.
*/
if (!kvm_vcpu_has_lapic(vcpu))
return 0;
highest_irr = apic_find_highest_irr(vcpu->arch.apic);
return highest_irr;
}
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
unsigned long *dest_map);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
unsigned long *dest_map)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
irq->level, irq->trig_mode, dest_map);
}
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
sizeof(val));
}
static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
{
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
sizeof(*val));
}
static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
apic_debug("Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return val & 0x1;
}
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
apic_debug("Can't set EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return;
}
__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
apic_debug("Can't clear EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return;
}
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{
int result;
/* Note that isr_count is always 1 with vid enabled */
if (!apic->isr_count)
return -1;
if (likely(apic->highest_isr_cache != -1))
return apic->highest_isr_cache;
result = find_highest_vector(apic->regs + APIC_ISR);
ASSERT(result == -1 || result >= 16);
return result;
}
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int i;
for (i = 0; i < 8; i++)
apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]);
}
static void apic_update_ppr(struct kvm_lapic *apic)
{
u32 tpr, isrv, ppr, old_ppr;
int isr;
old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI);
tpr = kvm_apic_get_reg(apic, APIC_TASKPRI);
isr = apic_find_highest_isr(apic);
isrv = (isr != -1) ? isr : 0;
if ((tpr & 0xf0) >= (isrv & 0xf0))
ppr = tpr & 0xff;
else
ppr = isrv & 0xf0;
apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
apic, ppr, isr, isrv);
if (old_ppr != ppr) {
apic_set_reg(apic, APIC_PROCPRI, ppr);
if (ppr < old_ppr)
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
}
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
apic_set_reg(apic, APIC_TASKPRI, tpr);
apic_update_ppr(apic);
}
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
{
return dest == 0xff || kvm_apic_id(apic) == dest;
}
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
{
int result = 0;
u32 logical_id;
if (apic_x2apic_mode(apic)) {
logical_id = kvm_apic_get_reg(apic, APIC_LDR);
return logical_id & mda;
}
logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR));
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
if (logical_id & mda)
result = 1;
break;
case APIC_DFR_CLUSTER:
if (((logical_id >> 4) == (mda >> 0x4))
&& (logical_id & mda & 0xf))
result = 1;
break;
default:
apic_debug("Bad DFR vcpu %d: %08x\n",
apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
break;
}
return result;
}
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode)
{
int result = 0;
struct kvm_lapic *target = vcpu->arch.apic;
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
target, source, dest, dest_mode, short_hand);
ASSERT(target);
switch (short_hand) {
case APIC_DEST_NOSHORT:
if (dest_mode == 0)
/* Physical mode. */
result = kvm_apic_match_physical_addr(target, dest);
else
/* Logical mode. */
result = kvm_apic_match_logical_addr(target, dest);
break;
case APIC_DEST_SELF:
result = (target == source);
break;
case APIC_DEST_ALLINC:
result = 1;
break;
case APIC_DEST_ALLBUT:
result = (target != source);
break;
default:
apic_debug("kvm: apic: Bad dest shorthand value %x\n",
short_hand);
break;
}
return result;
}
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
{
struct kvm_apic_map *map;
unsigned long bitmap = 1;
struct kvm_lapic **dst;
int i;
bool ret = false;
*r = -1;
if (irq->shorthand == APIC_DEST_SELF) {
*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
return true;
}
if (irq->shorthand)
return false;
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
if (!map)
goto out;
if (irq->dest_mode == 0) { /* physical mode */
if (irq->delivery_mode == APIC_DM_LOWEST ||
irq->dest_id == 0xff)
goto out;
dst = &map->phys_map[irq->dest_id & 0xff];
} else {
u32 mda = irq->dest_id << (32 - map->ldr_bits);
dst = map->logical_map[apic_cluster_id(map, mda)];
bitmap = apic_logical_id(map, mda);
if (irq->delivery_mode == APIC_DM_LOWEST) {
int l = -1;
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
if (l < 0)
l = i;
else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
l = i;
}
bitmap = (l >= 0) ? 1 << l : 0;
}
}
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
if (*r < 0)
*r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
ret = true;
out:
rcu_read_unlock();
return ret;
}
/*
* Add a pending IRQ into lapic.
* Return 1 if successfully added and 0 if discarded.
*/
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
unsigned long *dest_map)
{
int result = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
case APIC_DM_FIXED:
/* FIXME add logic for vcpu on reset */
if (unlikely(!apic_enabled(apic)))
break;
result = 1;
if (dest_map)
__set_bit(vcpu->vcpu_id, dest_map);
if (kvm_x86_ops->deliver_posted_interrupt)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
apic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
trig_mode, vector, false);
break;
case APIC_DM_REMRD:
result = 1;
vcpu->arch.pv.pv_unhalted = 1;
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_SMI:
apic_debug("Ignoring guest SMI\n");
break;
case APIC_DM_NMI:
result = 1;
kvm_inject_nmi(vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_INIT:
if (!trig_mode || level) {
result = 1;
/* assumes that there are only KVM_APIC_INIT/SIPI */
apic->pending_events = (1UL << KVM_APIC_INIT);
/* make sure pending_events is visible before sending
* the request */
smp_wmb();
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
} else {
apic_debug("Ignoring de-assert INIT to vcpu %d\n",
vcpu->vcpu_id);
}
break;
case APIC_DM_STARTUP:
apic_debug("SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector);
result = 1;
apic->sipi_vector = vector;
/* make sure sipi_vector is visible for the receiver */
smp_wmb();
set_bit(KVM_APIC_SIPI, &apic->pending_events);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_EXTINT:
/*
* Should only be called by kvm_apic_local_deliver() with LVT0,
* before NMI watchdog was enabled. Already handled by
* kvm_apic_accept_pic_intr().
*/
break;
default:
printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
delivery_mode);
break;
}
return result;
}
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
}
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;
else
trigger_mode = IOAPIC_EDGE_TRIG;
kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
}
}
static int apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
trace_kvm_eoi(apic, vector);
/*
* Not every write EOI will has corresponding ISR,
* one example is when Kernel check timer on setup_IO_APIC
*/
if (vector == -1)
return vector;
apic_clear_isr(vector, apic);
apic_update_ppr(apic);
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
return vector;
}
/*
* this interface assumes a trap-like exit, which has already finished
* desired side effect including vISR and vPPR update.
*/
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_lapic *apic = vcpu->arch.apic;
trace_kvm_eoi(apic, vector);
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
static void apic_send_ipi(struct kvm_lapic *apic)
{
u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2);
struct kvm_lapic_irq irq;
irq.vector = icr_low & APIC_VECTOR_MASK;
irq.delivery_mode = icr_low & APIC_MODE_MASK;
irq.dest_mode = icr_low & APIC_DEST_MASK;
irq.level = icr_low & APIC_INT_ASSERT;
irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
irq.shorthand = icr_low & APIC_SHORT_MASK;
if (apic_x2apic_mode(apic))
irq.dest_id = icr_high;
else
irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
trace_kvm_apic_ipi(icr_low, irq.dest_id);
apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
"dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
icr_high, icr_low, irq.shorthand, irq.dest_id,
irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector);
kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
}
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
ktime_t remaining;
s64 ns;
u32 tmcct;
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
apic->lapic_timer.period == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
tmcct = div64_u64(ns,
(APIC_BUS_CYCLE_NS * apic->divide_count));
return tmcct;
}
static void __report_tpr_access(struct kvm_lapic *apic, bool write)
{
struct kvm_vcpu *vcpu = apic->vcpu;
struct kvm_run *run = vcpu->run;
kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.is_write = write;
}
static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
{
if (apic->vcpu->arch.tpr_access_reporting)
__report_tpr_access(apic, write);
}
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{
u32 val = 0;
if (offset >= LAPIC_MMIO_LENGTH)
return 0;
switch (offset) {
case APIC_ID:
if (apic_x2apic_mode(apic))
val = kvm_apic_id(apic);
else
val = kvm_apic_id(apic) << 24;
break;
case APIC_ARBPRI:
apic_debug("Access APIC ARBPRI register which is for P6\n");
break;
case APIC_TMCCT: /* Timer CCR */
if (apic_lvtt_tscdeadline(apic))
return 0;
val = apic_get_tmcct(apic);
break;
case APIC_PROCPRI:
apic_update_ppr(apic);
val = kvm_apic_get_reg(apic, offset);
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
/* fall thru */
default:
val = kvm_apic_get_reg(apic, offset);
break;
}
return val;
}
static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
{
return container_of(dev, struct kvm_lapic, dev);
}
static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
void *data)
{
unsigned char alignment = offset & 0xf;
u32 result;
/* this bitmask has a bit cleared for each reserved register */
static const u64 rmask = 0x43ff01ffffffe70cULL;
if ((alignment + len) > 4) {
apic_debug("KVM_APIC_READ: alignment error %x %d\n",
offset, len);
return 1;
}
if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
apic_debug("KVM_APIC_READ: read reserved register %x\n",
offset);
return 1;
}
result = __apic_read(apic, offset & ~0xf);
trace_kvm_apic_read(offset, result);
switch (len) {
case 1:
case 2:
case 4:
memcpy(data, (char *)&result + alignment, len);
break;
default:
printk(KERN_ERR "Local APIC read with len = %x, "
"should be 1,2, or 4 instead\n", len);
break;
}
return 0;
}
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
return kvm_apic_hw_enabled(apic) &&
addr >= apic->base_address &&
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_io_device *this,
gpa_t address, int len, void *data)
{
struct kvm_lapic *apic = to_lapic(this);
u32 offset = address - apic->base_address;
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
apic_reg_read(apic, offset, len, data);
return 0;
}
static void update_divide_count(struct kvm_lapic *apic)
{
u32 tmp1, tmp2, tdcr;
tdcr = kvm_apic_get_reg(apic, APIC_TDCR);
tmp1 = tdcr & 0xf;
tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
apic->divide_count = 0x1 << (tmp2 & 0x7);
apic_debug("timer divide count is 0x%x\n",
apic->divide_count);
}
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
/* lapic timer in oneshot or periodic mode */
now = apic->lapic_timer.timer.base->get_time();
apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
* APIC_BUS_CYCLE_NS * apic->divide_count;
if (!apic->lapic_timer.period)
return;
/*
* Do not allow the guest to program periodic timers with small
* interval, since the hrtimers are not throttled by the host
* scheduler.
*/
if (apic_lvtt_period(apic)) {
s64 min_period = min_timer_period_us * 1000LL;
if (apic->lapic_timer.period < min_period) {
pr_info_ratelimited(
"kvm: vcpu %i: requested %lld ns "
"lapic timer period limited to %lld ns\n",
apic->vcpu->vcpu_id,
apic->lapic_timer.period, min_period);
apic->lapic_timer.period = min_period;
}
}
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, apic->lapic_timer.period),
HRTIMER_MODE_ABS);
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
"timer initial count 0x%x, period %lldns, "
"expire @ 0x%016" PRIx64 ".\n", __func__,
APIC_BUS_CYCLE_NS, ktime_to_ns(now),
kvm_apic_get_reg(apic, APIC_TMICT),
apic->lapic_timer.period,
ktime_to_ns(ktime_add_ns(now,
apic->lapic_timer.period)));
} else if (apic_lvtt_tscdeadline(apic)) {
/* lapic timer in tsc deadline mode */
u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
u64 ns = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;
if (unlikely(!tscdeadline || !this_tsc_khz))
return;
local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time();
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
}
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
local_irq_restore(flags);
}
}
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{
int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0));
if (apic_lvt_nmi_mode(lvt0_val)) {
if (!nmi_wd_enabled) {
apic_debug("Receive NMI setting on APIC_LVT0 "
"for cpu %d\n", apic->vcpu->vcpu_id);
apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
}
} else if (nmi_wd_enabled)
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
}
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
{
int ret = 0;
trace_kvm_apic_write(reg, val);
switch (reg) {
case APIC_ID: /* Local APIC ID */
if (!apic_x2apic_mode(apic))
kvm_apic_set_id(apic, val >> 24);
else
ret = 1;
break;
case APIC_TASKPRI:
report_tpr_access(apic, true);
apic_set_tpr(apic, val & 0xff);
break;
case APIC_EOI:
apic_set_eoi(apic);
break;
case APIC_LDR:
if (!apic_x2apic_mode(apic))
kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
else
ret = 1;
break;
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
recalculate_apic_map(apic->vcpu->kvm);
} else
ret = 1;
break;
case APIC_SPIV: {
u32 mask = 0x3ff;
if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
mask |= APIC_SPIV_DIRECTED_EOI;
apic_set_spiv(apic, val & mask);
if (!(val & APIC_SPIV_APIC_ENABLED)) {
int i;
u32 lvt_val;
for (i = 0; i < APIC_LVT_NUM; i++) {
lvt_val = kvm_apic_get_reg(apic,
APIC_LVTT + 0x10 * i);
apic_set_reg(apic, APIC_LVTT + 0x10 * i,
lvt_val | APIC_LVT_MASKED);
}
atomic_set(&apic->lapic_timer.pending, 0);
}
break;
}
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
apic_send_ipi(apic);
break;
case APIC_ICR2:
if (!apic_x2apic_mode(apic))
val &= 0xff000000;
apic_set_reg(apic, APIC_ICR2, val);
break;
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
case APIC_LVTERR:
/* TODO: Check vector */
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
apic_set_reg(apic, reg, val);
break;
case APIC_LVTT:
if ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) !=
(val & apic->lapic_timer.timer_mode_mask))
hrtimer_cancel(&apic->lapic_timer.timer);
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
apic_set_reg(apic, APIC_LVTT, val);
break;
case APIC_TMICT:
if (apic_lvtt_tscdeadline(apic))
break;
hrtimer_cancel(&apic->lapic_timer.timer);
apic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
case APIC_TDCR:
if (val & 4)
apic_debug("KVM_WRITE:TDCR %x\n", val);
apic_set_reg(apic, APIC_TDCR, val);
update_divide_count(apic);
break;
case APIC_ESR:
if (apic_x2apic_mode(apic) && val != 0) {
apic_debug("KVM_WRITE:ESR not zero %x\n", val);
ret = 1;
}
break;
case APIC_SELF_IPI:
if (apic_x2apic_mode(apic)) {
apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
} else
ret = 1;
break;
default:
ret = 1;
break;
}
if (ret)
apic_debug("Local APIC Write to read-only register %x\n", reg);
return ret;
}
static int apic_mmio_write(struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
struct kvm_lapic *apic = to_lapic(this);
unsigned int offset = address - apic->base_address;
u32 val;
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
* Refer SDM 8.4.1
*/
if (len != 4 || (offset & 0xf)) {
/* Don't shout loud, $infamous_os would cause only noise. */
apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
return 0;
}
val = *(u32*)data;
/* too common printing */
if (offset != APIC_EOI)
apic_debug("%s: offset 0x%x with length 0x%x, and value is "
"0x%x\n", __func__, offset, len, val);
apic_reg_write(apic, offset & 0xff0, val);
return 0;
}
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_has_lapic(vcpu))
apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
u32 val = 0;
/* hw has done the conditional check and inst decode */
offset &= 0xff0;
apic_reg_read(vcpu->arch.apic, offset, 4, &val);
/* TODO: optimize to just emulate side effect w/o one more write */
apic_reg_write(vcpu->arch.apic, offset, val);
}
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
void kvm_free_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!vcpu->arch.apic)
return;
hrtimer_cancel(&apic->lapic_timer.timer);
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_key_slow_dec_deferred(&apic_hw_disabled);
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
static_key_slow_dec_deferred(&apic_sw_disabled);
if (apic->regs)
free_page((unsigned long)apic->regs);
kfree(apic);
}
/*
*----------------------------------------------------------------------
* LAPIC interface
*----------------------------------------------------------------------
*/
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return 0;
return apic->lapic_timer.tscdeadline;
}
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;
hrtimer_cancel(&apic->lapic_timer.timer);
apic->lapic_timer.tscdeadline = data;
start_apic_timer(apic);
}
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return;
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
}
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
u64 tpr;
if (!kvm_vcpu_has_lapic(vcpu))
return 0;
tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
return (tpr & 0xf0) >> 4;
}
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
u64 old_value = vcpu->arch.apic_base;
struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic) {
value |= MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
return;
}
/* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else
static_key_slow_inc(&apic_hw_disabled.key);
recalculate_apic_map(vcpu->kvm);
}
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
kvm_apic_set_ldr(apic, ldr);
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
} else
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
}
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
/* with FSB delivery interrupt, we can restart APIC functionality */
apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
"0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
}
void kvm_lapic_reset(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
int i;
apic_debug("%s\n", __func__);
ASSERT(vcpu);
apic = vcpu->arch.apic;
ASSERT(apic != NULL);
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
kvm_apic_set_id(apic, vcpu->vcpu_id);
kvm_apic_set_version(apic->vcpu);
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_set_reg(apic, APIC_DFR, 0xffffffffU);
apic_set_spiv(apic, 0xff);
apic_set_reg(apic, APIC_TASKPRI, 0);
kvm_apic_set_ldr(apic, 0);
apic_set_reg(apic, APIC_ESR, 0);
apic_set_reg(apic, APIC_ICR, 0);
apic_set_reg(apic, APIC_ICR2, 0);
apic_set_reg(apic, APIC_TDCR, 0);
apic_set_reg(apic, APIC_TMICT, 0);
for (i = 0; i < 8; i++) {
apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
apic->highest_isr_cache = -1;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
if (kvm_vcpu_is_bsp(vcpu))
kvm_lapic_set_base(vcpu,
vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address);
}
/*
*----------------------------------------------------------------------
* timer interface
*----------------------------------------------------------------------
*/
static bool lapic_is_periodic(struct kvm_lapic *apic)
{
return apic_lvtt_period(apic);
}
int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) &&
apic_lvt_enabled(apic, APIC_LVTT))
return atomic_read(&apic->lapic_timer.pending);
return 0;
}
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = kvm_apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
NULL);
}
return 0;
}
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (apic)
kvm_apic_local_deliver(apic, APIC_LVT0);
}
static const struct kvm_io_device_ops apic_mmio_ops = {
.read = apic_mmio_read,
.write = apic_mmio_write,
};
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
struct kvm_vcpu *vcpu = apic->vcpu;
wait_queue_head_t *q = &vcpu->wq;
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest.
*/
if (!atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
/* FIXME: this code should not know anything about vcpus */
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
if (waitqueue_active(q))
wake_up_interruptible(q);
if (lapic_is_periodic(apic)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}
int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
ASSERT(vcpu != NULL);
apic_debug("apic_init %d\n", vcpu->vcpu_id);
apic = kzalloc(sizeof(*apic), GFP_KERNEL);
if (!apic)
goto nomem;
vcpu->arch.apic = apic;
apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
if (!apic->regs) {
printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
vcpu->vcpu_id);
goto nomem_free_apic;
}
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = apic_timer_fn;
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
* thinking that APIC satet has changed.
*/
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
kvm_lapic_set_base(vcpu,
APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
kvm_lapic_reset(vcpu);
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
return 0;
nomem_free_apic:
kfree(apic);
nomem:
return -ENOMEM;
}
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic))
return -1;
apic_update_ppr(apic);
highest_irr = apic_find_highest_irr(apic);
if ((highest_irr == -1) ||
((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
return -1;
return highest_irr;
}
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{
u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0);
int r = 0;
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
r = 1;
if ((lvt0 & APIC_LVT_MASKED) == 0 &&
GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
r = 1;
return r;
}
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return;
if (atomic_read(&apic->lapic_timer.pending) > 0) {
kvm_apic_local_deliver(apic, APIC_LVTT);
atomic_set(&apic->lapic_timer.pending, 0);
}
}
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{
int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
if (vector == -1)
return -1;
apic_set_isr(vector, apic);
apic_update_ppr(apic);
apic_clear_irr(vector, apic);
return vector;
}
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
struct kvm_lapic *apic = vcpu->arch.apic;
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
/* call kvm_apic_set_id() to put apic into apic_map */
kvm_apic_set_id(apic, kvm_apic_id(apic));
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer);
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_rtc_eoi_tracking_restore_one(vcpu);
}
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
struct hrtimer *timer;
if (!kvm_vcpu_has_lapic(vcpu))
return;
timer = &vcpu->arch.apic->lapic_timer.timer;
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/*
* apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
*
* Detect whether guest triggered PV EOI since the
* last entry. If yes, set EOI on guests's behalf.
* Clear PV EOI in guest memory in any case.
*/
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
bool pending;
int vector;
/*
* PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
* and KVM_PV_EOI_ENABLED in guest memory as follows:
*
* KVM_APIC_PV_EOI_PENDING is unset:
* -> host disabled PV EOI.
* KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
* -> host enabled PV EOI, guest did not execute EOI yet.
* KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
* -> host enabled PV EOI, guest executed EOI.
*/
BUG_ON(!pv_eoi_enabled(vcpu));
pending = pv_eoi_get_pending(vcpu);
/*
* Clear pending bit in any case: it will be set again on vmentry.
* While this might not be ideal from performance point of view,
* this makes sure pv eoi is only enabled when we know it's safe.
*/
pv_eoi_clr_pending(vcpu);
if (pending)
return;
vector = apic_set_eoi(apic);
trace_kvm_pv_eoi(apic, vector);
}
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
void *vapic;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
kunmap_atomic(vapic);
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
/*
* apic_sync_pv_eoi_to_guest - called before vmentry
*
* Detect whether it's safe to enable PV EOI and
* if yes do so.
*/
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
if (!pv_eoi_enabled(vcpu) ||
/* IRR set or many bits in ISR: could be nested. */
apic->irr_pending ||
/* Cache not set: could be safe but we don't bother. */
apic->highest_isr_cache == -1 ||
/* Need EOI to update ioapic. */
kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
/*
* PV EOI was disabled by apic_sync_pv_eoi_from_guest
* so we need not do anything here.
*/
return;
}
pv_eoi_set_pending(apic->vcpu);
}
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic;
void *vapic;
apic_sync_pv_eoi_to_guest(vcpu, apic);
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff;
max_irr = apic_find_highest_irr(apic);
if (max_irr < 0)
max_irr = 0;
max_isr = apic_find_highest_isr(apic);
if (max_isr < 0)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
kunmap_atomic(vapic);
}
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
vcpu->arch.apic->vapic_addr = vapic_addr;
if (vapic_addr)
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
else
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
/* if this is ICR write vector before command */
if (msr == 0x830)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
if (apic_reg_read(apic, reg, 4, &low))
return 1;
if (msr == 0x830)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
return 0;
}
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return 1;
/* if this is ICR write vector before command */
if (reg == APIC_ICR)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 low, high = 0;
if (!kvm_vcpu_has_lapic(vcpu))
return 1;
if (apic_reg_read(apic, reg, 4, &low))
return 1;
if (reg == APIC_ICR)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
return 0;
}
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
{
u64 addr = data & ~KVM_MSR_ENABLED;
if (!IS_ALIGNED(addr, 4))
return 1;
vcpu->arch.pv_eoi.msr_val = data;
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
addr, sizeof(u8));
}
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
unsigned int sipi_vector;
unsigned long pe;
if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
return;
pe = xchg(&apic->pending_events, 0);
if (test_bit(KVM_APIC_INIT, &pe)) {
kvm_lapic_reset(vcpu);
kvm_vcpu_reset(vcpu);
if (kvm_vcpu_is_bsp(apic->vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
}
if (test_bit(KVM_APIC_SIPI, &pe) &&
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
/* evaluate pending_events before reading the vector */
smp_rmb();
sipi_vector = apic->sipi_vector;
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, sipi_vector);
kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
void kvm_lapic_init(void)
{
/* do not patch jump label more than once per second */
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5793_0 |
crossvul-cpp_data_good_3521_0 | /*
* linux/mm/oom_kill.c
*
* Copyright (C) 1998,2000 Rik van Riel
* Thanks go out to Claus Fischer for some serious inspiration and
* for goading me into coding this file...
* Copyright (C) 2010 Google, Inc.
* Rewritten by David Rientjes
*
* The routines in this file are used to kill a process when
* we're seriously out of memory. This gets called from __alloc_pages()
* in mm/page_alloc.c when we really run out of memory.
*
* Since we won't call these routines often (on a well-configured
* machine) this file will double as a 'coding guide' and a signpost
* for newbie kernel hackers. It features several pointers to major
* kernel subsystems and hints as to where to find out what things do.
*/
#include <linux/oom.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/cpuset.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
#include <linux/mempolicy.h>
#include <linux/security.h>
#include <linux/ptrace.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
static DEFINE_SPINLOCK(zone_scan_lock);
/**
* test_set_oom_score_adj() - set current's oom_score_adj and return old value
* @new_val: new oom_score_adj value
*
* Sets the oom_score_adj value for current to @new_val with proper
* synchronization and returns the old value. Usually used to temporarily
* set a value, save the old value in the caller, and then reinstate it later.
*/
int test_set_oom_score_adj(int new_val)
{
struct sighand_struct *sighand = current->sighand;
int old_val;
spin_lock_irq(&sighand->siglock);
old_val = current->signal->oom_score_adj;
if (new_val != old_val) {
if (new_val == OOM_SCORE_ADJ_MIN)
atomic_inc(¤t->mm->oom_disable_count);
else if (old_val == OOM_SCORE_ADJ_MIN)
atomic_dec(¤t->mm->oom_disable_count);
current->signal->oom_score_adj = new_val;
}
spin_unlock_irq(&sighand->siglock);
return old_val;
}
#ifdef CONFIG_NUMA
/**
* has_intersects_mems_allowed() - check task eligiblity for kill
* @tsk: task struct of which task to consider
* @mask: nodemask passed to page allocator for mempolicy ooms
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
* and whether or not it has the same set of allowed cpuset nodes.
*/
static bool has_intersects_mems_allowed(struct task_struct *tsk,
const nodemask_t *mask)
{
struct task_struct *start = tsk;
do {
if (mask) {
/*
* If this is a mempolicy constrained oom, tsk's
* cpuset is irrelevant. Only return true if its
* mempolicy intersects current, otherwise it may be
* needlessly killed.
*/
if (mempolicy_nodemask_intersects(tsk, mask))
return true;
} else {
/*
* This is not a mempolicy constrained oom, so only
* check the mems of tsk's cpuset.
*/
if (cpuset_mems_allowed_intersects(current, tsk))
return true;
}
} while_each_thread(start, tsk);
return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
const nodemask_t *mask)
{
return true;
}
#endif /* CONFIG_NUMA */
/*
* The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held.
*/
struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t = p;
do {
task_lock(t);
if (likely(t->mm))
return t;
task_unlock(t);
} while_each_thread(p, t);
return NULL;
}
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p,
const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
if (p->flags & PF_KTHREAD)
return true;
/* When mem_cgroup_out_of_memory() and p is not member of the group */
if (mem && !task_in_mem_cgroup(p, mem))
return true;
/* p may not have freeable memory in nodemask */
if (!has_intersects_mems_allowed(p, nodemask))
return true;
return false;
}
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
p = find_lock_task_mm(p);
if (!p)
return 0;
/*
* Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
* so the entire heuristic doesn't need to be executed for something
* that cannot be killed.
*/
if (atomic_read(&p->mm->oom_disable_count)) {
task_unlock(p);
return 0;
}
/*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
*/
if (!totalpages)
totalpages = 1;
/*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
points = get_mm_rss(p->mm) + p->mm->nr_ptes;
points += get_mm_counter(p->mm, MM_SWAPENTS);
points *= 1000;
points /= totalpages;
task_unlock(p);
/*
* Root processes get 3% bonus, just like the __vm_enough_memory()
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
points -= 30;
/*
* /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
* either completely disable oom killing or always prefer a certain
* task.
*/
points += p->signal->oom_score_adj;
/*
* Never return 0 for an eligible task that may be killed since it's
* possible that no single user task uses more than 0.1% of memory and
* no single admin tasks uses more than 3.0%.
*/
if (points <= 0)
return 1;
return (points < 1000) ? points : 1000;
}
/*
* Determine the type of allocation constraint.
*/
#ifdef CONFIG_NUMA
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
gfp_t gfp_mask, nodemask_t *nodemask,
unsigned long *totalpages)
{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
bool cpuset_limited = false;
int nid;
/* Default to all available memory */
*totalpages = totalram_pages + total_swap_pages;
if (!zonelist)
return CONSTRAINT_NONE;
/*
* Reach here only when __GFP_NOFAIL is used. So, we should avoid
* to kill current.We have to random task kill in this case.
* Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
*/
if (gfp_mask & __GFP_THISNODE)
return CONSTRAINT_NONE;
/*
* This is not a __GFP_THISNODE allocation, so a truncated nodemask in
* the page allocator means a mempolicy is in effect. Cpuset policy
* is enforced in get_page_from_freelist().
*/
if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
*totalpages = total_swap_pages;
for_each_node_mask(nid, *nodemask)
*totalpages += node_spanned_pages(nid);
return CONSTRAINT_MEMORY_POLICY;
}
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask)
if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
cpuset_limited = true;
if (cpuset_limited) {
*totalpages = total_swap_pages;
for_each_node_mask(nid, cpuset_current_mems_allowed)
*totalpages += node_spanned_pages(nid);
return CONSTRAINT_CPUSET;
}
return CONSTRAINT_NONE;
}
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
gfp_t gfp_mask, nodemask_t *nodemask,
unsigned long *totalpages)
{
*totalpages = totalram_pages + total_swap_pages;
return CONSTRAINT_NONE;
}
#endif
/*
* Simple selection loop. We chose the process with the highest
* number of 'points'. We expect the caller will lock the tasklist.
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
static struct task_struct *select_bad_process(unsigned int *ppoints,
unsigned long totalpages, struct mem_cgroup *mem,
const nodemask_t *nodemask)
{
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
*ppoints = 0;
do_each_thread(g, p) {
unsigned int points;
if (p->exit_state)
continue;
if (oom_unkillable_task(p, mem, nodemask))
continue;
/*
* This task already has access to memory reserves and is
* being killed. Don't allow any other task access to the
* memory reserve.
*
* Note: this may have a chance of deadlock if it gets
* blocked waiting for another task which itself is waiting
* for memory. Is there a better alternative?
*/
if (test_tsk_thread_flag(p, TIF_MEMDIE))
return ERR_PTR(-1UL);
if (!p->mm)
continue;
if (p->flags & PF_EXITING) {
/*
* If p is the current task and is in the process of
* releasing memory, we allow the "kill" to set
* TIF_MEMDIE, which will allow it to gain access to
* memory reserves. Otherwise, it may stall forever.
*
* The loop isn't broken here, however, in case other
* threads are found to have already been oom killed.
*/
if (p == current) {
chosen = p;
*ppoints = 1000;
} else {
/*
* If this task is not being ptraced on exit,
* then wait for it to finish before killing
* some other task unnecessarily.
*/
if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
return ERR_PTR(-1UL);
}
}
points = oom_badness(p, mem, nodemask, totalpages);
if (points > *ppoints) {
chosen = p;
*ppoints = points;
}
} while_each_thread(g, p);
return chosen;
}
/**
* dump_tasks - dump current memory state of all system tasks
* @mem: current's memory controller, if constrained
* @nodemask: nodemask passed to page allocator for mempolicy ooms
*
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
* are not shown.
* State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
* value, oom_score_adj value, and name.
*
* Call with tasklist_lock read-locked.
*/
static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
for_each_process(p) {
if (oom_unkillable_task(p, mem, nodemask))
continue;
task = find_lock_task_mm(p);
if (!task) {
/*
* This is a kthread or all of p's threads have already
* detached their mm's. There's no need to report
* them; they can't be oom killed anyway.
*/
continue;
}
pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
task->pid, task_uid(task), task->tgid,
task->mm->total_vm, get_mm_rss(task->mm),
task_cpu(task), task->signal->oom_adj,
task->signal->oom_score_adj, task->comm);
task_unlock(task);
}
}
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
struct mem_cgroup *mem, const nodemask_t *nodemask)
{
task_lock(current);
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
"oom_adj=%d, oom_score_adj=%d\n",
current->comm, gfp_mask, order, current->signal->oom_adj,
current->signal->oom_score_adj);
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
mem_cgroup_print_oom_info(mem, p);
show_mem(SHOW_MEM_FILTER_NODES);
if (sysctl_oom_dump_tasks)
dump_tasks(mem, nodemask);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
{
struct task_struct *q;
struct mm_struct *mm;
p = find_lock_task_mm(p);
if (!p)
return 1;
/* mm cannot be safely dereferenced after task_unlock(p) */
mm = p->mm;
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
task_pid_nr(p), p->comm, K(p->mm->total_vm),
K(get_mm_counter(p->mm, MM_ANONPAGES)),
K(get_mm_counter(p->mm, MM_FILEPAGES)));
task_unlock(p);
/*
* Kill all processes sharing p->mm in other thread groups, if any.
* They don't get access to memory reserves or a higher scheduler
* priority, though, to avoid depletion of all memory or task
* starvation. This prevents mm->mmap_sem livelock when an oom killed
* task cannot exit because it requires the semaphore and its contended
* by another thread trying to allocate memory itself. That thread will
* now get access to memory reserves since it has a pending fatal
* signal.
*/
for_each_process(q)
if (q->mm == mm && !same_thread_group(q, p)) {
task_lock(q); /* Protect ->comm from prctl() */
pr_err("Kill process %d (%s) sharing same memory\n",
task_pid_nr(q), q->comm);
task_unlock(q);
force_sig(SIGKILL, q);
}
set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p);
return 0;
}
#undef K
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
unsigned int points, unsigned long totalpages,
struct mem_cgroup *mem, nodemask_t *nodemask,
const char *message)
{
struct task_struct *victim = p;
struct task_struct *child;
struct task_struct *t = p;
unsigned int victim_points = 0;
if (printk_ratelimit())
dump_header(p, gfp_mask, order, mem, nodemask);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just set TIF_MEMDIE so it can die quickly
*/
if (p->flags & PF_EXITING) {
set_tsk_thread_flag(p, TIF_MEMDIE);
return 0;
}
task_lock(p);
pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
message, task_pid_nr(p), p->comm, points);
task_unlock(p);
/*
* If any of p's children has a different mm and is eligible for kill,
* the one with the highest oom_badness() score is sacrificed for its
* parent. This attempts to lose the minimal amount of work done while
* still freeing memory.
*/
do {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
if (child->mm == p->mm)
continue;
/*
* oom_badness() returns 0 if the thread is unkillable
*/
child_points = oom_badness(child, mem, nodemask,
totalpages);
if (child_points > victim_points) {
victim = child;
victim_points = child_points;
}
}
} while_each_thread(p, t);
return oom_kill_task(victim, mem);
}
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
int order, const nodemask_t *nodemask)
{
if (likely(!sysctl_panic_on_oom))
return;
if (sysctl_panic_on_oom != 2) {
/*
* panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
* does not panic for cpuset, mempolicy, or memcg allocation
* failures.
*/
if (constraint != CONSTRAINT_NONE)
return;
}
read_lock(&tasklist_lock);
dump_header(NULL, gfp_mask, order, NULL, nodemask);
read_unlock(&tasklist_lock);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
unsigned long limit;
unsigned int points = 0;
struct task_struct *p;
/*
* If current has a pending SIGKILL, then automatically select it. The
* goal is to allow it to allocate so that it may quickly exit and free
* its memory.
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
read_lock(&tasklist_lock);
retry:
p = select_bad_process(&points, limit, mem, NULL);
if (!p || PTR_ERR(p) == -1UL)
goto out;
if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
"Memory cgroup out of memory"))
goto retry;
out:
read_unlock(&tasklist_lock);
}
#endif
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
int register_oom_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);
int unregister_oom_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);
/*
* Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
* if a parallel OOM killing is already taking place that includes a zone in
* the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
*/
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
struct zoneref *z;
struct zone *zone;
int ret = 1;
spin_lock(&zone_scan_lock);
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
if (zone_is_oom_locked(zone)) {
ret = 0;
goto out;
}
}
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
/*
* Lock each zone in the zonelist under zone_scan_lock so a
* parallel invocation of try_set_zonelist_oom() doesn't succeed
* when it shouldn't.
*/
zone_set_flag(zone, ZONE_OOM_LOCKED);
}
out:
spin_unlock(&zone_scan_lock);
return ret;
}
/*
* Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
* allocation attempts with zonelists containing them may now recall the OOM
* killer, if necessary.
*/
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
{
struct zoneref *z;
struct zone *zone;
spin_lock(&zone_scan_lock);
for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
zone_clear_flag(zone, ZONE_OOM_LOCKED);
}
spin_unlock(&zone_scan_lock);
}
/*
* Try to acquire the oom killer lock for all system zones. Returns zero if a
* parallel oom killing is taking place, otherwise locks all zones and returns
* non-zero.
*/
static int try_set_system_oom(void)
{
struct zone *zone;
int ret = 1;
spin_lock(&zone_scan_lock);
for_each_populated_zone(zone)
if (zone_is_oom_locked(zone)) {
ret = 0;
goto out;
}
for_each_populated_zone(zone)
zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
spin_unlock(&zone_scan_lock);
return ret;
}
/*
* Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
* attempts or page faults may now recall the oom killer, if necessary.
*/
static void clear_system_oom(void)
{
struct zone *zone;
spin_lock(&zone_scan_lock);
for_each_populated_zone(zone)
zone_clear_flag(zone, ZONE_OOM_LOCKED);
spin_unlock(&zone_scan_lock);
}
/**
* out_of_memory - kill the "best" process when we run out of memory
* @zonelist: zonelist pointer
* @gfp_mask: memory allocation flags
* @order: amount of memory being requested as a power of 2
* @nodemask: nodemask passed to page allocator
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask)
{
const nodemask_t *mpol_mask;
struct task_struct *p;
unsigned long totalpages;
unsigned long freed = 0;
unsigned int points;
enum oom_constraint constraint = CONSTRAINT_NONE;
int killed = 0;
blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
if (freed > 0)
/* Got some memory back in the last second. */
return;
/*
* If current has a pending SIGKILL, then automatically select it. The
* goal is to allow it to allocate so that it may quickly exit and free
* its memory.
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}
/*
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
&totalpages);
mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
!oom_unkillable_task(current, NULL, nodemask) &&
current->mm && !atomic_read(¤t->mm->oom_disable_count)) {
/*
* oom_kill_process() needs tasklist_lock held. If it returns
* non-zero, current could not be killed so we must fallback to
* the tasklist scan.
*/
if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
NULL, nodemask,
"Out of memory (oom_kill_allocating_task)"))
goto out;
}
retry:
p = select_bad_process(&points, totalpages, NULL, mpol_mask);
if (PTR_ERR(p) == -1UL)
goto out;
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
read_unlock(&tasklist_lock);
panic("Out of memory and no killable processes...\n");
}
if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
nodemask, "Out of memory"))
goto retry;
killed = 1;
out:
read_unlock(&tasklist_lock);
/*
* Give "p" a good chance of killing itself before we
* retry to allocate memory unless "p" is current
*/
if (killed && !test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1);
}
/*
* The pagefault handler calls here because it is out of memory, so kill a
* memory-hogging task. If a populated zone has ZONE_OOM_LOCKED set, a parallel
* oom killing is already in progress so do nothing. If a task is found with
* TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
*/
void pagefault_out_of_memory(void)
{
if (try_set_system_oom()) {
out_of_memory(NULL, 0, 0, NULL);
clear_system_oom();
}
if (!test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3521_0 |
crossvul-cpp_data_bad_3663_0 | /*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
#include <stdio.h>
#include <string.h>
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
{
struct hblk ** result = (struct hblk **)
GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
GC_bool ignore_off_page,
GC_bool retry); /* from alloc.c */
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
word n_blocks;
ptr_t result;
GC_bool retry = FALSE;
/* Round up to a multiple of a granule. */
lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
if (GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
h = GC_allochblk(lb, k, flags);
# ifdef USE_MUNMAP
if (0 == h) {
GC_merge_unmapped();
h = GC_allochblk(lb, k, flags);
}
# endif
while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
h = GC_allochblk(lb, k, flags);
retry = TRUE;
}
if (h == 0) {
result = 0;
} else {
size_t total_bytes = n_blocks * HBLKSIZE;
if (n_blocks > 1) {
GC_large_allocd_bytes += total_bytes;
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
GC_max_large_allocd_bytes = GC_large_allocd_bytes;
}
result = h -> hb_body;
}
return result;
}
/* Allocate a large block of size lb bytes. Clear if appropriate. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
ptr_t result = GC_alloc_large(lb, k, flags);
word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (0 == result) return 0;
if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
/* Clear the whole block, in case of GC_realloc call. */
BZERO(result, n_blocks * HBLKSIZE);
}
return result;
}
/* allocate lb bytes for an object of kind k. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
if(SMALL_OBJ(lb)) {
struct obj_kind * kind = GC_obj_kinds + k;
size_t lg = GC_size_map[lb];
void ** opp = &(kind -> ok_freelist[lg]);
op = *opp;
if (EXPECT(0 == op, FALSE)) {
if (GC_size_map[lb] == 0) {
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
return(GC_generic_malloc_inner(lb, k));
}
if (kind -> ok_reclaim_list == 0) {
if (!GC_alloc_reclaim_list(kind)) goto out;
}
op = GC_allocobj(lg, k);
if (op == 0) goto out;
}
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
} else {
op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
GC_bytes_allocd += lb;
}
out:
return op;
}
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
word lb_adjusted;
void * op;
if (lb <= HBLKSIZE)
return(GC_generic_malloc_inner(lb, k));
lb_adjusted = ADD_SLOP(lb);
op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
GC_bytes_allocd += lb_adjusted;
return op;
}
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
DCL_LOCK_STATE;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
} else {
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
if (lb_rounded < lb)
return((*GC_get_oom_fn())(lb));
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
LOCK();
result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
UNLOCK();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
return((*GC_get_oom_fn())(lb));
} else {
return(result);
}
}
/* Allocate lb bytes of atomic (pointerfree) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc_atomic(size_t lb)
#else
GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
{
void *op;
void ** opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = &(GC_aobjfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
*opp = obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return((void *) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
}
/* Allocate lb bytes of composite (pointerful) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc(size_t lb)
#else
GC_API void * GC_CALL GC_malloc(size_t lb)
#endif
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = (void **)&(GC_objfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return (GENERAL_MALLOC((word)lb, NORMAL));
}
GC_ASSERT(0 == obj_link(op)
|| ((word)obj_link(op)
<= (word)GC_greatest_plausible_heap_addr
&& (word)obj_link(op)
>= (word)GC_least_plausible_heap_addr));
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return op;
} else {
return(GENERAL_MALLOC(lb, NORMAL));
}
}
/* Allocate lb bytes of pointerful, traced, but not collectable data */
GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
lg = GC_size_map[lb];
opp = &(GC_uobjfreelist[lg]);
LOCK();
op = *opp;
if (EXPECT(0 != op, TRUE)) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit ws already set on free list. It will be */
/* cleared only temporarily during a collection, as a */
/* result of the normal free list mark bit clearing. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
/* For small objects, the free lists are completely marked. */
}
GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
hdr * hhdr;
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
if (0 == op) return(0);
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
hhdr = HDR(op);
/* We don't need the lock here, since we have an undisguised */
/* pointer. We do need to hold the lock while we adjust */
/* mark bits. */
LOCK();
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
GC_ASSERT(hhdr -> hb_n_marks == 0);
hhdr -> hb_n_marks = 1;
UNLOCK();
return((void *) op);
}
}
#ifdef REDIRECT_MALLOC
# ifndef MSWINCE
# include <errno.h>
# endif
/* Avoid unnecessary nested procedure calls here, by #defining some */
/* malloc replacements. Otherwise we end up saving a */
/* meaningless return address in the object. It also speeds things up, */
/* but it is admittedly quite ugly. */
# define GC_debug_malloc_replacement(lb) \
GC_debug_malloc(lb, GC_DBG_RA "unknown", 0)
void * malloc(size_t lb)
{
/* It might help to manually inline the GC_malloc call here. */
/* But any decent compiler should reduce the extra procedure call */
/* to at most a jump instruction in this case. */
# if defined(I386) && defined(GC_SOLARIS_THREADS)
/*
* Thread initialisation can call malloc before
* we're ready for it.
* It's not clear that this is enough to help matters.
* The thread implementation may well call malloc at other
* inopportune times.
*/
if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
# endif /* I386 && GC_SOLARIS_THREADS */
return((void *)REDIRECT_MALLOC(lb));
}
#if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
STATIC ptr_t GC_libpthread_start = 0;
STATIC ptr_t GC_libpthread_end = 0;
STATIC ptr_t GC_libld_start = 0;
STATIC ptr_t GC_libld_end = 0;
STATIC void GC_init_lib_bounds(void)
{
if (GC_libpthread_start != 0) return;
GC_init(); /* if not called yet */
if (!GC_text_mapping("libpthread-",
&GC_libpthread_start, &GC_libpthread_end)) {
WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
/* This might still work with some versions of libpthread, */
/* so we don't abort. Perhaps we should. */
/* Generate message only once: */
GC_libpthread_start = (ptr_t)1;
}
if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
}
}
#endif /* GC_LINUX_THREADS */
#ifndef SIZE_MAX
#define SIZE_MAX (~(size_t)0)
#endif
void * calloc(size_t n, size_t lb)
{
if (lb && n > SIZE_MAX / lb)
return NULL;
# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
/* libpthread allocated some memory that is only pointed to by */
/* mmapped thread stacks. Make sure it's not collectable. */
{
static GC_bool lib_bounds_set = FALSE;
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (!EXPECT(lib_bounds_set, TRUE)) {
GC_init_lib_bounds();
lib_bounds_set = TRUE;
}
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end))
return GC_malloc_uncollectable(n*lb);
/* The two ranges are actually usually adjacent, so there may */
/* be a way to speed this up. */
}
# endif
return((void *)REDIRECT_MALLOC(n*lb));
}
#ifndef strdup
char *strdup(const char *s)
{
size_t lb = strlen(s) + 1;
char *result = (char *)REDIRECT_MALLOC(lb);
if (result == 0) {
errno = ENOMEM;
return 0;
}
BCOPY(s, result, lb);
return result;
}
#endif /* !defined(strdup) */
/* If strdup is macro defined, we assume that it actually calls malloc, */
/* and thus the right thing will happen even without overriding it. */
/* This seems to be true on most Linux systems. */
#ifndef strndup
/* This is similar to strdup(). */
char *strndup(const char *str, size_t size)
{
char *copy;
size_t len = strlen(str);
if (len > size)
len = size;
copy = (char *)REDIRECT_MALLOC(len + 1);
if (copy == NULL) {
errno = ENOMEM;
return NULL;
}
BCOPY(str, copy, len);
copy[len] = '\0';
return copy;
}
#endif /* !strndup */
#undef GC_debug_malloc_replacement
#endif /* REDIRECT_MALLOC */
/* Explicitly deallocate an object p. */
GC_API void GC_CALL GC_free(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* In bytes */
size_t ngranules; /* sz in granules */
void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
# ifdef LOG_ALLOCS
GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no);
# endif
h = HBLKPTR(p);
hhdr = HDR(h);
# if defined(REDIRECT_MALLOC) && \
(defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
|| defined(MSWIN32))
/* For Solaris, we have to redirect malloc calls during */
/* initialization. For the others, this seems to happen */
/* implicitly. */
/* Don't try to deallocate that memory. */
if (0 == hhdr) return;
# endif
GC_ASSERT(GC_base(p) == p);
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
/* Its unnecessary to clear the mark bit. If the */
/* object is reallocated, it doesn't matter. O.w. the */
/* collector will do it, since it's on a free list. */
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
UNLOCK();
}
}
/* Explicitly deallocate an object p when we already hold lock. */
/* Only used for internally allocated objects, so we can take some */
/* shortcuts. */
#ifdef THREADS
GC_INNER void GC_free_inner(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* bytes */
size_t ngranules; /* sz in granules */
void ** flh;
int knd;
struct obj_kind * ok;
h = HBLKPTR(p);
hhdr = HDR(h);
knd = hhdr -> hb_obj_kind;
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
}
}
#endif /* THREADS */
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
# define REDIRECT_FREE GC_free
#endif
#ifdef REDIRECT_FREE
void free(void * p)
{
# if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
{
/* Don't bother with initialization checks. If nothing */
/* has been initialized, the check fails, and that's safe, */
/* since we haven't allocated uncollectable objects either. */
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end)) {
GC_free(p);
return;
}
}
# endif
# ifndef IGNORE_FREE
REDIRECT_FREE(p);
# endif
}
#endif /* REDIRECT_FREE */
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3663_0 |
crossvul-cpp_data_good_3689_0 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "mcdi.h"
#include "workarounds.h"
/**************************************************************************
*
* Type name strings
*
**************************************************************************
*/
/* Loopback mode names (see LOOPBACK_MODE()) */
const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
const char *efx_loopback_mode_names[] = {
[LOOPBACK_NONE] = "NONE",
[LOOPBACK_DATA] = "DATAPATH",
[LOOPBACK_GMAC] = "GMAC",
[LOOPBACK_XGMII] = "XGMII",
[LOOPBACK_XGXS] = "XGXS",
[LOOPBACK_XAUI] = "XAUI",
[LOOPBACK_GMII] = "GMII",
[LOOPBACK_SGMII] = "SGMII",
[LOOPBACK_XGBR] = "XGBR",
[LOOPBACK_XFI] = "XFI",
[LOOPBACK_XAUI_FAR] = "XAUI_FAR",
[LOOPBACK_GMII_FAR] = "GMII_FAR",
[LOOPBACK_SGMII_FAR] = "SGMII_FAR",
[LOOPBACK_XFI_FAR] = "XFI_FAR",
[LOOPBACK_GPHY] = "GPHY",
[LOOPBACK_PHYXS] = "PHYXS",
[LOOPBACK_PCS] = "PCS",
[LOOPBACK_PMAPMD] = "PMA/PMD",
[LOOPBACK_XPORT] = "XPORT",
[LOOPBACK_XGMII_WS] = "XGMII_WS",
[LOOPBACK_XAUI_WS] = "XAUI_WS",
[LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
[LOOPBACK_GMII_WS] = "GMII_WS",
[LOOPBACK_XFI_WS] = "XFI_WS",
[LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
[LOOPBACK_PHYXS_WS] = "PHYXS_WS",
};
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
#define EFX_MAX_MTU (9 * 1024)
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
*/
static struct workqueue_struct *reset_workqueue;
/**************************************************************************
*
* Configurable values
*
*************************************************************************/
/*
* Use separate channels for TX and RX events
*
* Set this to 1 to use separate channels for TX and RX. It allows us
* to control interrupt affinity separately for TX and RX.
*
* This is only used in MSI-X interrupt mode
*/
static unsigned int separate_tx_channels;
module_param(separate_tx_channels, uint, 0444);
MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX");
/* This is the weight assigned to each of the (per-channel) virtual
* NAPI devices.
*/
static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware
* monitor. On Falcon-based NICs, this will:
* - Check the on-board hardware monitor;
* - Poll the link state and reconfigure the hardware as necessary.
*/
static unsigned int efx_monitor_interval = 1 * HZ;
/* This controls whether or not the driver will initialise devices
* with invalid MAC addresses stored in the EEPROM or flash. If true,
* such devices will be initialised with a random locally-generated
* MAC address. This allows for loading the sfc_mtd driver to
* reprogram the flash, even if the flash contents (including the MAC
* address) have previously been erased.
*/
static unsigned int allow_bad_hwaddr;
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
* The default for RX should strike a balance between increasing the
* round-trip latency and reducing overhead.
*/
static unsigned int rx_irq_mod_usec = 60;
/* Initial interrupt moderation settings. They can be modified after
* module load with ethtool.
*
* This default is chosen to ensure that a 10G link does not go idle
* while a TX queue is stopped after it has become full. A queue is
* restarted when it drops below half full. The time this takes (assuming
* worst case 3 descriptors per packet and 1024 descriptors) is
* 512 / 3 * 1.2 = 205 usec.
*/
static unsigned int tx_irq_mod_usec = 150;
/* This is the first interrupt mode to try out of:
* 0 => MSI-X
* 1 => MSI
* 2 => legacy
*/
static unsigned int interrupt_mode;
/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
* i.e. the number of CPUs among which we may distribute simultaneous
* interrupt handling.
*
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
* The default (0) means to assign an interrupt to each package (level II cache)
*/
static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
static unsigned irq_adapt_low_thresh = 10000;
module_param(irq_adapt_low_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_low_thresh,
"Threshold score for reducing IRQ moderation");
static unsigned irq_adapt_high_thresh = 20000;
module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR | NETIF_MSG_HW);
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
/**************************************************************************
*
* Utility functions and prototypes
*
*************************************************************************/
static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
static void efx_init_napi(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_RUNNING) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
/**************************************************************************
*
* Event queue processing
*
*************************************************************************/
/* Process channel's event queue
*
* This function is responsible for processing the event queue of a
* single channel. The caller must guarantee that this function will
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
int spent;
if (unlikely(efx->reset_pending || !channel->enabled))
return 0;
spent = efx_nic_process_eventq(channel, budget);
if (spent == 0)
return 0;
/* Deliver last RX packet. */
if (channel->rx_pkt) {
__efx_rx_packet(channel, channel->rx_pkt,
channel->rx_pkt_csummed);
channel->rx_pkt = NULL;
}
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
return spent;
}
/* Mark channel as finished processing
*
* Note that since we will not receive further interrupts for this
* channel before we finish processing and call the eventq_read_ack()
* method, there is no need to use the interrupt hold-off timers.
*/
static inline void efx_channel_processed(struct efx_channel *channel)
{
/* The interrupt handler for this channel may set work_pending
* as soon as we acknowledge the events we've seen. Make sure
* it's cleared before then. */
channel->work_pending = false;
smp_wmb();
efx_nic_eventq_read_ack(channel);
}
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
* provides the guarantee required by efx_process_channel().
*/
static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx;
int spent;
netif_vdbg(efx, intr, efx->net_dev,
"channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
spent = efx_process_channel(channel, budget);
if (spent < budget) {
if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
irq_adapt_low_thresh)) {
if (channel->irq_moderation > 1) {
channel->irq_moderation -= 1;
efx->type->push_irq_moderation(channel);
}
} else if (unlikely(channel->irq_mod_score >
irq_adapt_high_thresh)) {
if (channel->irq_moderation <
efx->irq_rx_moderation) {
channel->irq_moderation += 1;
efx->type->push_irq_moderation(channel);
}
}
channel->irq_count = 0;
channel->irq_mod_score = 0;
}
efx_filter_rfs_expire(channel);
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
efx_channel_processed(channel);
}
return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
*
* Disable hardware generated interrupts, wait for any existing
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
if (channel->irq)
synchronize_irq(channel->irq);
/* Wait for any NAPI processing to complete */
napi_disable(&channel->napi_str);
/* Poll the channel */
efx_process_channel(channel, channel->eventq_mask + 1);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
* errors during channel reset and also simplifies interrupt handling.
*/
static int efx_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned long entries;
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"chan %d create event queue\n", channel->channel);
/* Build an event queue with room for one event per tx and rx buffer,
* plus some extra for link state events and MCDI completions. */
entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
return efx_nic_probe_eventq(channel);
}
/* Prepare channel's event queue */
static void efx_init_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d init event queue\n", channel->channel);
channel->eventq_read_ptr = 0;
efx_nic_init_eventq(channel);
}
static void efx_fini_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
}
static void efx_remove_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel);
efx_nic_remove_eventq(channel);
}
/**************************************************************************
*
* Channel handling
*
*************************************************************************/
/* Allocate and initialise a channel structure, optionally copying
* parameters (but not resources) from an old channel structure. */
static struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int j;
if (old_channel) {
channel = kmalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return NULL;
*channel = *old_channel;
channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j];
if (tx_queue->channel)
tx_queue->channel = channel;
tx_queue->buffer = NULL;
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}
} else {
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return NULL;
channel->efx = efx;
channel->channel = i;
for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx;
tx_queue->queue = i * EFX_TXQ_TYPES + j;
tx_queue->channel = channel;
}
}
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
(unsigned long)rx_queue);
return channel;
}
static int efx_probe_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc;
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"creating channel %d\n", channel->channel);
rc = efx_probe_eventq(channel);
if (rc)
goto fail1;
efx_for_each_channel_tx_queue(tx_queue, channel) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
goto fail2;
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
rc = efx_probe_rx_queue(rx_queue);
if (rc)
goto fail3;
}
channel->n_rx_frm_trunc = 0;
return 0;
fail3:
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
fail2:
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
fail1:
return rc;
}
static void efx_set_channel_names(struct efx_nic *efx)
{
struct efx_channel *channel;
const char *type = "";
int number;
efx_for_each_channel(channel, efx) {
number = channel->channel;
if (efx->n_channels > efx->n_rx_channels) {
if (channel->channel < efx->n_rx_channels) {
type = "-rx";
} else {
type = "-tx";
number -= efx->n_rx_channels;
}
}
snprintf(efx->channel_name[channel->channel],
sizeof(efx->channel_name[0]),
"%s%s-%d", efx->name, type, number);
}
}
static int efx_probe_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
/* Restart special buffer allocation */
efx->next_buffer_table = 0;
efx_for_each_channel(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create channel %d\n",
channel->channel);
goto fail;
}
}
efx_set_channel_names(efx);
return 0;
fail:
efx_remove_channels(efx);
return rc;
}
/* Channels are shutdown and reinitialised whilst the NIC is running
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
*/
static void efx_init_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_hash_size +
efx->type->rx_buffer_padding);
efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state));
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"init chan %d\n", channel->channel);
efx_init_eventq(channel);
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy(channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_init_rx_queue(rx_queue);
WARN_ON(channel->rx_pkt != NULL);
efx_rx_strategy(channel);
}
}
/* This enables event queue processing and packet transmission.
*
* Note that this function is not allowed to fail, since that would
* introduce too much complexity into the suspend/resume path.
*/
static void efx_start_channel(struct efx_channel *channel)
{
struct efx_rx_queue *rx_queue;
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"starting chan %d\n", channel->channel);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set. */
channel->work_pending = false;
channel->enabled = true;
smp_wmb();
/* Fill the queues before enabling NAPI */
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fast_push_rx_descriptors(rx_queue);
napi_enable(&channel->napi_str);
}
/* This disables event queue processing and packet transmission.
* This function does not guarantee that all queue processing
* (e.g. RX refill) is complete.
*/
static void efx_stop_channel(struct efx_channel *channel)
{
if (!channel->enabled)
return;
netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
"stop chan %d\n", channel->channel);
channel->enabled = false;
napi_disable(&channel->napi_str);
}
static void efx_fini_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
/* Schedule a reset to recover from the flush failure. The
* descriptor caches reference memory we're about to free,
* but falcon_reconfigure_mac_wrapper() won't reconnect
* the MACs because of the pending reset. */
netif_err(efx, drv, efx->net_dev,
"Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
"successfully flushed all queues\n");
}
efx_for_each_channel(channel, efx) {
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"shut down chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
efx_fini_eventq(channel);
}
}
static void efx_remove_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
}
static void efx_remove_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
}
int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i;
int rc;
efx_stop_all(efx);
efx_fini_channels(efx);
/* Clone channels */
memset(other_channel, 0, sizeof(other_channel));
for (i = 0; i < efx->n_channels; i++) {
channel = efx_alloc_channel(efx, i, efx->channel[i]);
if (!channel) {
rc = -ENOMEM;
goto out;
}
other_channel[i] = channel;
}
/* Swap entry counts and channel pointers */
old_rxq_entries = efx->rxq_entries;
old_txq_entries = efx->txq_entries;
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
efx->channel[i] = other_channel[i];
other_channel[i] = channel;
}
rc = efx_probe_channels(efx);
if (rc)
goto rollback;
efx_init_napi(efx);
/* Destroy old channels */
for (i = 0; i < efx->n_channels; i++) {
efx_fini_napi_channel(other_channel[i]);
efx_remove_channel(other_channel[i]);
}
out:
/* Free unused channel structures */
for (i = 0; i < efx->n_channels; i++)
kfree(other_channel[i]);
efx_init_channels(efx);
efx_start_all(efx);
return rc;
rollback:
/* Swap back */
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
efx->channel[i] = other_channel[i];
other_channel[i] = channel;
}
goto out;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
/**************************************************************************
*
* Port handling
*
**************************************************************************/
/* This ensures that the kernel is kept informed (via
* netif_carrier_on/off) of the link status, and also maintains the
* link status's stop on the port's TX queue.
*/
void efx_link_status_changed(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
* that no events are triggered between unregister_netdev() and the
* driver unloading. A more general condition is that NETDEV_CHANGE
* can only be generated between NETDEV_UP and NETDEV_DOWN */
if (!netif_running(efx->net_dev))
return;
if (link_state->up != netif_carrier_ok(efx->net_dev)) {
efx->n_link_state_changes++;
if (link_state->up)
netif_carrier_on(efx->net_dev);
else
netif_carrier_off(efx->net_dev);
}
/* Status message for kernel log */
if (link_state->up) {
netif_info(efx, link, efx->net_dev,
"link up at %uMbps %s-duplex (MTU %d)%s\n",
link_state->speed, link_state->fd ? "full" : "half",
efx->net_dev->mtu,
(efx->promiscuous ? " [PROMISC]" : ""));
} else {
netif_info(efx, link, efx->net_dev, "link down\n");
}
}
void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
{
efx->link_advertising = advertising;
if (advertising) {
if (advertising & ADVERTISED_Pause)
efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
else
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
if (advertising & ADVERTISED_Asym_Pause)
efx->wanted_fc ^= EFX_FC_TX;
}
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
if (efx->link_advertising) {
if (wanted_fc & EFX_FC_RX)
efx->link_advertising |= (ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
else
efx->link_advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
efx->link_advertising ^= ADVERTISED_Asym_Pause;
}
}
static void efx_fini_port(struct efx_nic *efx);
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
* through efx_monitor().
*
* Callers must hold the mac_lock
*/
int __efx_reconfigure_port(struct efx_nic *efx)
{
enum efx_phy_mode phy_mode;
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Serialise the promiscuous flag with efx_set_multicast_list. */
if (efx_dev_registered(efx)) {
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
}
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
efx->phy_mode |= PHY_MODE_TX_DISABLED;
else
efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
rc = efx->type->reconfigure_port(efx);
if (rc)
efx->phy_mode = phy_mode;
return rc;
}
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
* disabled. */
int efx_reconfigure_port(struct efx_nic *efx)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
mutex_lock(&efx->mac_lock);
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
return rc;
}
/* Asynchronous work item for changing MAC promiscuity and multicast
* hash. Avoid a drain/rx_ingress enable by reconfiguring the current
* MAC directly. */
static void efx_mac_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled) {
efx->type->push_multicast_hash(efx);
efx->mac_op->reconfigure(efx);
}
mutex_unlock(&efx->mac_lock);
}
static int efx_probe_port(struct efx_nic *efx)
{
unsigned char *perm_addr;
int rc;
netif_dbg(efx, probe, efx->net_dev, "create port\n");
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
return rc;
/* Sanity check MAC address */
perm_addr = efx->net_dev->perm_addr;
if (is_valid_ether_addr(perm_addr)) {
memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
} else {
netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
perm_addr);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
}
random_ether_addr(efx->net_dev->dev_addr);
netif_info(efx, probe, efx->net_dev,
"using locally-generated MAC %pM\n",
efx->net_dev->dev_addr);
}
return 0;
err:
efx->type->remove_port(efx);
return rc;
}
static int efx_init_port(struct efx_nic *efx)
{
int rc;
netif_dbg(efx, drv, efx->net_dev, "init port\n");
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->init(efx);
if (rc)
goto fail1;
efx->port_initialized = true;
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
efx->mac_op->reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
if (rc)
goto fail2;
mutex_unlock(&efx->mac_lock);
return 0;
fail2:
efx->phy_op->fini(efx);
fail1:
mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_start_port(struct efx_nic *efx)
{
netif_dbg(efx, ifup, efx->net_dev, "start port\n");
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
/* efx_mac_work() might have been scheduled after efx_stop_port(),
* and then cancelled by efx_flush_all() */
efx->type->push_multicast_hash(efx);
efx->mac_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
/* Prevent efx_mac_work() and efx_monitor() from working */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */
if (efx_dev_registered(efx)) {
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
}
}
static void efx_fini_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
if (!efx->port_initialized)
return;
efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_state.up = false;
efx_link_status_changed(efx);
}
static void efx_remove_port(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
efx->type->remove_port(efx);
}
/**************************************************************************
*
* NIC handling
*
**************************************************************************/
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to enable PCI device\n");
goto fail1;
}
pci_set_master(pci_dev);
/* Set the PCI DMA mask. Try all possibilities from our
* genuine mask down to 32 bits, because some architectures
* (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
if (pci_dma_supported(pci_dev, dma_mask) &&
((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
break;
dma_mask >>= 1;
}
if (rc) {
netif_err(efx, probe, efx->net_dev,
"could not find a suitable DMA mask\n");
goto fail2;
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
if (rc) {
/* pci_set_consistent_dma_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask() accepted,
* but just in case...
*/
netif_err(efx, probe, efx->net_dev,
"failed to set consistent DMA mask\n");
goto fail2;
}
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
rc = -EIO;
goto fail3;
}
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size, efx->membase);
return 0;
fail4:
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
fail3:
efx->membase_phys = 0;
fail2:
pci_disable_device(efx->pci_dev);
fail1:
return rc;
}
static void efx_fini_io(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
efx->membase = NULL;
}
if (efx->membase_phys) {
pci_release_region(efx->pci_dev, EFX_MEM_BAR);
efx->membase_phys = 0;
}
pci_disable_device(efx->pci_dev);
}
/* Get number of channels wanted. Each channel will have its own IRQ,
* 1 RX queue and/or 2 TX queues. */
static int efx_wanted_channels(void)
{
cpumask_var_t core_mask;
int count;
int cpu;
if (rss_cpus)
return rss_cpus;
if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
}
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) {
++count;
cpumask_or(core_mask, core_mask,
topology_core_cpumask(cpu));
}
}
free_cpumask_var(core_mask);
return count;
}
static int
efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
{
#ifdef CONFIG_RFS_ACCEL
int i, rc;
efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap)
return -ENOMEM;
for (i = 0; i < efx->n_rx_channels; i++) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
xentries[i].vector);
if (rc) {
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
return rc;
}
}
#endif
return 0;
}
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
int max_channels =
min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
int rc, i;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
int n_channels;
n_channels = efx_wanted_channels();
if (separate_tx_channels)
n_channels *= 2;
n_channels = min(n_channels, max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %d).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
n_channels);
}
if (rc == 0) {
efx->n_channels = n_channels;
if (separate_tx_channels) {
efx->n_tx_channels =
max(efx->n_channels / 2, 1U);
efx->n_rx_channels =
max(efx->n_channels -
efx->n_tx_channels, 1U);
} else {
efx->n_tx_channels = efx->n_channels;
efx->n_rx_channels = efx->n_channels;
}
rc = efx_init_rx_cpu_rmap(efx, xentries);
if (rc) {
pci_disable_msix(efx->pci_dev);
return rc;
}
for (i = 0; i < n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
netif_err(efx, drv, efx->net_dev,
"could not enable MSI-X\n");
}
}
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
} else {
netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n");
efx->interrupt_mode = EFX_INT_MODE_LEGACY;
}
}
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
return 0;
}
static void efx_remove_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
/* Remove MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
channel->irq = 0;
pci_disable_msi(efx->pci_dev);
pci_disable_msix(efx->pci_dev);
/* Remove legacy interrupt */
efx->legacy_irq = 0;
}
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
/* We need to adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
}
}
static int efx_probe_nic(struct efx_nic *efx)
{
size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
/* Carry out hardware-type specific initialisation */
rc = efx->type->probe(efx);
if (rc)
return rc;
/* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
rc = efx_probe_interrupts(efx);
if (rc)
goto fail;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] = i % efx->n_rx_channels;
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
true);
return 0;
fail:
efx->type->remove(efx);
return rc;
}
static void efx_remove_nic(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
efx_remove_interrupts(efx);
efx->type->remove(efx);
}
/**************************************************************************
*
* NIC startup/shutdown
*
*************************************************************************/
static int efx_probe_all(struct efx_nic *efx)
{
int rc;
rc = efx_probe_nic(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
rc = efx_probe_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
rc = -EINVAL;
goto fail3;
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_channels(efx);
if (rc)
goto fail3;
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
goto fail4;
}
return 0;
fail4:
efx_remove_channels(efx);
fail3:
efx_remove_port(efx);
fail2:
efx_remove_nic(efx);
fail1:
return rc;
}
/* Called after previous invocation(s) of efx_stop_all, restarts the
* port, kernel transmit queue, NAPI processing and hardware interrupts,
* and ensures that the port is scheduled to be reconfigured.
* This function is safe to call multiple times when the NIC is in any
* state. */
static void efx_start_all(struct efx_nic *efx)
{
struct efx_channel *channel;
EFX_ASSERT_RESET_SERIALISED(efx);
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
if (efx->port_enabled)
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
return;
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
efx_start_channel(channel);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
/* Switch to event based MCDI completions after enabling interrupts.
* If a reset has been scheduled, then we need to stay in polled mode.
* Rather than serialising efx_mcdi_mode_event() [which sleeps] and
* reset_pending [modified from an atomic context], we instead guarantee
* that efx_mcdi_mode_poll() isn't reverted erroneously */
efx_mcdi_mode_event(efx);
if (efx->reset_pending)
efx_mcdi_mode_poll(efx);
/* Start the hardware monitor if there is one. Otherwise (we're link
* event driven), we have to poll the PHY because after an event queue
* flush, we could have a missed a link state change */
if (efx->type->monitor != NULL) {
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
} else {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
}
efx->type->start_stats(efx);
}
/* Flush all delayed work. Should only be called when no more delayed work
* will be scheduled. This doesn't flush pending online resets (efx_reset),
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
/* Make sure the hardware monitor is stopped */
cancel_delayed_work_sync(&efx->monitor_work);
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
/* Quiesce hardware and software without bringing the link down.
* Safe to call multiple times, when the nic and interface is in any
* state. The caller is guaranteed to subsequently be in a position
* to modify any hardware and software state they see fit without
* taking locks. */
static void efx_stop_all(struct efx_nic *efx)
{
struct efx_channel *channel;
EFX_ASSERT_RESET_SERIALISED(efx);
/* port_enabled can be read safely under the rtnl lock */
if (!efx->port_enabled)
return;
efx->type->stop_stats(efx);
/* Switch to MCDI polling on Siena before disabling interrupts */
efx_mcdi_mode_poll(efx);
/* Disable interrupts and wait for ISR to complete */
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
}
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx)
efx_stop_channel(channel);
/* Stop all asynchronous port reconfigurations. Since all
* event processing has already been stopped, there is no
* window to loose phy events */
efx_stop_port(efx);
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx);
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
netif_tx_stop_all_queues(efx->net_dev);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
}
static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_filters(efx);
efx_remove_channels(efx);
efx_remove_port(efx);
efx_remove_nic(efx);
}
/**************************************************************************
*
* Interrupt moderation
*
**************************************************************************/
static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
{
if (usecs == 0)
return 0;
if (usecs < resolution)
return 1; /* never round down to 0 */
return usecs / resolution;
}
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx)
{
struct efx_channel *channel;
unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
EFX_ASSERT_RESET_SERIALISED(efx);
if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
return -EINVAL;
if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
!rx_may_override_tx) {
netif_err(efx, drv, efx->net_dev, "Channels are shared. "
"RX and TX IRQ moderation must be equal\n");
return -EINVAL;
}
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
return 0;
}
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive)
{
*rx_adaptive = efx->irq_rx_adaptive;
*rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
/* If channels are shared between RX and TX, so is IRQ
* moderation. Otherwise, IRQ moderation is the same for all
* TX channels and is not adaptive.
*/
if (efx->tx_channel_offset == 0)
*tx_usecs = *rx_usecs;
else
*tx_usecs =
efx->channel[efx->tx_channel_offset]->irq_moderation *
EFX_IRQ_MOD_RESOLUTION;
}
/**************************************************************************
*
* Hardware monitor
*
**************************************************************************/
/* Run periodically off the general workqueue */
static void efx_monitor(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work.work);
netif_vdbg(efx, timer, efx->net_dev,
"hardware monitor executing on CPU %d\n",
raw_smp_processor_id());
BUG_ON(efx->type->monitor == NULL);
/* If the mac_lock is already held then it is likely a port
* reconfiguration is already in place, which will likely do
* most of the work of monitor() anyway. */
if (mutex_trylock(&efx->mac_lock)) {
if (efx->port_enabled)
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
}
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
}
/**************************************************************************
*
* ioctls
*
*************************************************************************/
/* Net device ioctl
* Context: process, rtnl_lock() held.
*/
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
return mdio_mii_ioctl(&efx->mdio, data, cmd);
}
/**************************************************************************
*
* NAPI interface
*
**************************************************************************/
static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx) {
channel->napi_dev = efx->net_dev;
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
}
static void efx_fini_napi_channel(struct efx_channel *channel)
{
if (channel->napi_dev)
netif_napi_del(&channel->napi_str);
channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_fini_napi_channel(channel);
}
/**************************************************************************
*
* Kernel netpoll interface
*
*************************************************************************/
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Although in the common case interrupts will be disabled, this is not
* guaranteed. However, all our work happens inside the NAPI callback,
* so no locking is required.
*/
static void efx_netpoll(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_schedule_channel(channel);
}
#endif
/**************************************************************************
*
* Kernel net device interface
*
*************************************************************************/
/* Context: process, rtnl_lock() held. */
static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id());
if (efx->state == STATE_DISABLED)
return -EIO;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
return -EIO;
/* Notify the kernel of the link state polled during driver load,
* before the monitor starts running */
efx_link_status_changed(efx);
efx_start_all(efx);
return 0;
}
/* Context: process, rtnl_lock() held.
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
static int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */
efx_stop_all(efx);
efx_fini_channels(efx);
efx_init_channels(efx);
}
return 0;
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
spin_unlock_bh(&efx->stats_lock);
stats->rx_packets = mac_stats->rx_packets;
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
stats->rx_length_errors = (mac_stats->rx_gtjumbo +
mac_stats->rx_length_error);
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
stats->rx_fifo_errors = mac_stats->rx_overflow;
stats->rx_missed_errors = mac_stats->rx_missed;
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
return stats;
}
/* Context: netif_tx_lock held, BHs disabled. */
static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
netif_err(efx, tx_err, efx->net_dev,
"TX stuck with port_enabled=%d: resetting channels\n",
efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc = 0;
EFX_ASSERT_RESET_SERIALISED(efx);
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
efx_stop_all(efx);
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_fini_channels(efx);
mutex_lock(&efx->mac_lock);
/* Reconfigure the MAC before enabling the dma queues so that
* the RX buffers don't overflow */
net_dev->mtu = new_mtu;
efx->mac_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_init_channels(efx);
efx_start_all(efx);
return rc;
}
static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n",
new_addr);
return -EINVAL;
}
memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
efx->mac_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
return 0;
}
/* Context: netif_addr_lock held, BHs disabled. */
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
/* Build multicast hash table */
if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
set_bit_le(0xff, mc_hash->byte);
}
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
/* Otherwise efx_start_port() will do this */
}
static int efx_set_features(struct net_device *net_dev, u32 data)
{
struct efx_nic *efx = netdev_priv(net_dev);
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_multicast_list,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
};
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
efx_mtd_rename(efx);
efx_set_channel_names(efx);
}
static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *net_dev = ptr;
if (net_dev->netdev_ops == &efx_netdev_ops &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
return NOTIFY_DONE;
}
static struct notifier_block efx_netdev_notifier = {
.notifier_call = efx_netdev_event,
};
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
struct efx_channel *channel;
int rc;
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
/* Clear MAC statistics */
efx->mac_op->update_stats(efx);
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
rtnl_lock();
rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0)
goto fail_locked;
efx_update_name(efx);
rc = register_netdevice(net_dev);
if (rc)
goto fail_locked;
efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue_core_txq(tx_queue);
}
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to init net dev attributes\n");
goto fail_registered;
}
return 0;
fail_locked:
rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
fail_registered:
unregister_netdev(net_dev);
return rc;
}
static void efx_unregister_netdev(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_release_tx_buffers(tx_queue);
}
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
}
/**************************************************************************
*
* Device reset and suspend
*
**************************************************************************/
/* Tears down the entire software state and most of the hardware state
* before reset. */
void efx_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
/* This function will always ensure that the locks acquired in
* efx_reset_down() are released. A failure return code indicates
* that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. */
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
goto fail;
}
if (!ok)
goto fail;
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
if (efx->phy_op->reconfigure(efx))
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
efx->mac_op->reconfigure(efx);
efx_init_channels(efx);
efx_restore_filters(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
return 0;
fail:
efx->port_initialized = false;
mutex_unlock(&efx->mac_lock);
return rc;
}
/* Reset the NIC using the specified method. Note that the reset may
* fail, in which case the card will be left in an unusable state.
*
* Caller must hold the rtnl_lock.
*/
int efx_reset(struct efx_nic *efx, enum reset_type method)
{
int rc, rc2;
bool disabled;
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
netif_device_detach(efx->net_dev);
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
goto out;
}
/* Clear flags for the scopes we covered. We assume the NIC and
* driver are now quiescent so that there is no race here.
*/
efx->reset_pending &= -(1 << (method + 1));
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled. This is still appropriate, even in the
* RESET_TYPE_DISABLE since this driver generally assumes the hardware
* can respond to requests. */
pci_set_master(efx->pci_dev);
out:
/* Leave device stopped if necessary */
disabled = rc || method == RESET_TYPE_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
rc = rc2;
}
if (disabled) {
dev_close(efx->net_dev);
netif_err(efx, drv, efx->net_dev, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
netif_device_attach(efx->net_dev);
}
return rc;
}
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
unsigned long pending = ACCESS_ONCE(efx->reset_pending);
if (!pending)
return;
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flags set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
return;
}
rtnl_lock();
(void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock();
}
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
break;
default:
method = efx->type->map_reset_reason(type);
netif_dbg(efx, drv, efx->net_dev,
"scheduling %s reset for %s\n",
RESET_TYPE(method), RESET_TYPE(type));
break;
}
set_bit(method, &efx->reset_pending);
/* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */
efx_mcdi_mode_poll(efx);
queue_work(reset_workqueue, &efx->reset_work);
}
/**************************************************************************
*
* List of NICs we support
*
**************************************************************************/
/* PCI device ID table */
static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
/**************************************************************************
*
* Dummy PHY/MAC operations
*
* Can be used for some unimplemented operations
* Needed so all function pointers are valid and do not have to be tested
* before use
*
**************************************************************************/
int efx_port_dummy_op_int(struct efx_nic *efx)
{
return 0;
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
static bool efx_port_dummy_op_poll(struct efx_nic *efx)
{
return false;
}
static const struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
.reconfigure = efx_port_dummy_op_int,
.poll = efx_port_dummy_op_poll,
.fini = efx_port_dummy_op_void,
};
/**************************************************************************
*
* Data housekeeping
*
**************************************************************************/
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
/* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
#endif
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
}
efx->type = type;
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
/* Would be good to use the net_dev name, but we're too early */
snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
pci_name(pci_dev));
efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
if (!efx->workqueue)
goto fail;
return 0;
fail:
efx_fini_struct(efx);
return -ENOMEM;
}
static void efx_fini_struct(struct efx_nic *efx)
{
int i;
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
}
}
/**************************************************************************
*
* PCI interface
*
**************************************************************************/
/* Main body of final NIC shutdown code
* This is called only at module unload (or hotplug removal).
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
efx_nic_fini_interrupt(efx);
efx_fini_channels(efx);
efx_fini_port(efx);
efx->type->fini(efx);
efx_fini_napi(efx);
efx_remove_all(efx);
}
/* Final NIC shutdown
* This is called only at module unload (or hotplug removal).
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
struct efx_nic *efx;
efx = pci_get_drvdata(pci_dev);
if (!efx)
return;
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
efx->state = STATE_FINI;
dev_close(efx->net_dev);
/* Allow any queued efx_resets() to complete */
rtnl_unlock();
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx);
efx_fini_io(efx);
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
pci_set_drvdata(pci_dev, NULL);
efx_fini_struct(efx);
free_netdev(efx->net_dev);
};
/* Main body of NIC initialisation
* This is called at module load (or hotplug insertion, theoretically).
*/
static int efx_pci_probe_main(struct efx_nic *efx)
{
int rc;
/* Do start-of-day initialisation */
rc = efx_probe_all(efx);
if (rc)
goto fail1;
efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise NIC\n");
goto fail3;
}
rc = efx_init_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise port\n");
goto fail4;
}
efx_init_channels(efx);
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
return 0;
fail5:
efx_fini_channels(efx);
efx_fini_port(efx);
fail4:
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
efx_remove_all(efx);
fail1:
return rc;
}
/* NIC initialisation
*
* This is called at module load (or hotplug insertion,
* theoretically). It sets up PCI mappings, tests and resets the NIC,
* sets up and registers the network devices with the kernel and hooks
* the interrupt service routine. It does not prepare the device for
* transmission; this is left to the first time one of the network
* interfaces is brought up (i.e. efx_net_open).
*/
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
/* All offloads can be toggled */
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
if (rc)
goto fail1;
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
if (rc)
goto fail2;
/* No serialisation is required with the reset path because
* we're in STATE_INIT. */
for (i = 0; i < 5; i++) {
rc = efx_pci_probe_main(efx);
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we
* have not and never have been registered with either
* the rtnetlink or driverlink layers. */
cancel_work_sync(&efx->reset_work);
if (rc == 0) {
if (efx->reset_pending) {
/* If there was a scheduled reset during
* probe, the NIC is probably hosed anyway */
efx_pci_remove_main(efx);
rc = -EIO;
} else {
break;
}
}
/* Retry if a recoverably reset event has been scheduled */
if (efx->reset_pending &
~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
!efx->reset_pending)
goto fail3;
efx->reset_pending = 0;
}
if (rc) {
netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
goto fail4;
}
/* Switch to the running state before we expose the device to the OS,
* so that dev_open()|efx_start_all() will actually start the device */
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx);
if (rc)
goto fail5;
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
rtnl_lock();
efx_mtd_probe(efx); /* allowed to fail */
rtnl_unlock();
return 0;
fail5:
efx_pci_remove_main(efx);
fail4:
fail3:
efx_fini_io(efx);
fail2:
efx_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
}
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_FINI;
netif_device_detach(efx->net_dev);
efx_stop_all(efx);
efx_fini_channels(efx);
return 0;
}
static int efx_pm_thaw(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_INIT;
efx_init_channels(efx);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
netif_device_attach(efx->net_dev);
efx->state = STATE_RUNNING;
efx->type->resume_wol(efx);
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
queue_work(reset_workqueue, &efx->reset_work);
return 0;
}
static int efx_pm_poweroff(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct efx_nic *efx = pci_get_drvdata(pci_dev);
efx->type->fini(efx);
efx->reset_pending = 0;
pci_save_state(pci_dev);
return pci_set_power_state(pci_dev, PCI_D3hot);
}
/* Used for both resume and restore */
static int efx_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct efx_nic *efx = pci_get_drvdata(pci_dev);
int rc;
rc = pci_set_power_state(pci_dev, PCI_D0);
if (rc)
return rc;
pci_restore_state(pci_dev);
rc = pci_enable_device(pci_dev);
if (rc)
return rc;
pci_set_master(efx->pci_dev);
rc = efx->type->reset(efx, RESET_TYPE_ALL);
if (rc)
return rc;
rc = efx->type->init(efx);
if (rc)
return rc;
efx_pm_thaw(dev);
return 0;
}
static int efx_pm_suspend(struct device *dev)
{
int rc;
efx_pm_freeze(dev);
rc = efx_pm_poweroff(dev);
if (rc)
efx_pm_resume(dev);
return rc;
}
static struct dev_pm_ops efx_pm_ops = {
.suspend = efx_pm_suspend,
.resume = efx_pm_resume,
.freeze = efx_pm_freeze,
.thaw = efx_pm_thaw,
.poweroff = efx_pm_poweroff,
.restore = efx_pm_resume,
};
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
};
/**************************************************************************
*
* Kernel module interface
*
*************************************************************************/
module_param(interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
static int __init efx_init_module(void)
{
int rc;
printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
rc = register_netdevice_notifier(&efx_netdev_notifier);
if (rc)
goto err_notifier;
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
goto err_reset;
}
rc = pci_register_driver(&efx_pci_driver);
if (rc < 0)
goto err_pci;
return 0;
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
}
static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
}
module_init(efx_init_module);
module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
MODULE_DESCRIPTION("Solarflare Communications network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3689_0 |
crossvul-cpp_data_good_2020_2 | /*
* contrib/hstore/hstore_op.c
*/
#include "postgres.h"
#include "access/hash.h"
#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
#include "hstore.h"
/* old names for C functions */
HSTORE_POLLUTE(hstore_fetchval, fetchval);
HSTORE_POLLUTE(hstore_exists, exists);
HSTORE_POLLUTE(hstore_defined, defined);
HSTORE_POLLUTE(hstore_delete, delete);
HSTORE_POLLUTE(hstore_concat, hs_concat);
HSTORE_POLLUTE(hstore_contains, hs_contains);
HSTORE_POLLUTE(hstore_contained, hs_contained);
HSTORE_POLLUTE(hstore_akeys, akeys);
HSTORE_POLLUTE(hstore_avals, avals);
HSTORE_POLLUTE(hstore_skeys, skeys);
HSTORE_POLLUTE(hstore_svals, svals);
HSTORE_POLLUTE(hstore_each, each);
/*
* We're often finding a sequence of keys in ascending order. The
* "lowbound" parameter is used to cache lower bounds of searches
* between calls, based on this assumption. Pass NULL for it for
* one-off or unordered searches.
*/
int
hstoreFindKey(HStore *hs, int *lowbound, char *key, int keylen)
{
HEntry *entries = ARRPTR(hs);
int stopLow = lowbound ? *lowbound : 0;
int stopHigh = HS_COUNT(hs);
int stopMiddle;
char *base = STRPTR(hs);
while (stopLow < stopHigh)
{
int difference;
stopMiddle = stopLow + (stopHigh - stopLow) / 2;
if (HS_KEYLEN(entries, stopMiddle) == keylen)
difference = memcmp(HS_KEY(entries, base, stopMiddle), key, keylen);
else
difference = (HS_KEYLEN(entries, stopMiddle) > keylen) ? 1 : -1;
if (difference == 0)
{
if (lowbound)
*lowbound = stopMiddle + 1;
return stopMiddle;
}
else if (difference < 0)
stopLow = stopMiddle + 1;
else
stopHigh = stopMiddle;
}
if (lowbound)
*lowbound = stopLow;
return -1;
}
Pairs *
hstoreArrayToPairs(ArrayType *a, int *npairs)
{
Datum *key_datums;
bool *key_nulls;
int key_count;
Pairs *key_pairs;
int bufsiz;
int i,
j;
deconstruct_array(a,
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
if (key_count == 0)
{
*npairs = 0;
return NULL;
}
/*
* A text array uses at least eight bytes per element, so any overflow in
* "key_count * sizeof(Pairs)" is small enough for palloc() to catch.
* However, credible improvements to the array format could invalidate
* that assumption. Therefore, use an explicit check rather than relying
* on palloc() to complain.
*/
if (key_count > MaxAllocSize / sizeof(Pairs))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
key_count, (int) (MaxAllocSize / sizeof(Pairs)))));
key_pairs = palloc(sizeof(Pairs) * key_count);
for (i = 0, j = 0; i < key_count; i++)
{
if (!key_nulls[i])
{
key_pairs[j].key = VARDATA(key_datums[i]);
key_pairs[j].keylen = VARSIZE(key_datums[i]) - VARHDRSZ;
key_pairs[j].val = NULL;
key_pairs[j].vallen = 0;
key_pairs[j].needfree = 0;
key_pairs[j].isnull = 1;
j++;
}
}
*npairs = hstoreUniquePairs(key_pairs, j, &bufsiz);
return key_pairs;
}
PG_FUNCTION_INFO_V1(hstore_fetchval);
Datum hstore_fetchval(PG_FUNCTION_ARGS);
Datum
hstore_fetchval(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
text *out;
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
if (idx < 0 || HS_VALISNULL(entries, idx))
PG_RETURN_NULL();
out = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), idx),
HS_VALLEN(entries, idx));
PG_RETURN_TEXT_P(out);
}
PG_FUNCTION_INFO_V1(hstore_exists);
Datum hstore_exists(PG_FUNCTION_ARGS);
Datum
hstore_exists(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
PG_RETURN_BOOL(idx >= 0);
}
PG_FUNCTION_INFO_V1(hstore_exists_any);
Datum hstore_exists_any(PG_FUNCTION_ARGS);
Datum
hstore_exists_any(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1);
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
int i;
int lowbound = 0;
bool res = false;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; i++)
{
int idx = hstoreFindKey(hs, &lowbound,
key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
res = true;
break;
}
}
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_exists_all);
Datum hstore_exists_all(PG_FUNCTION_ARGS);
Datum
hstore_exists_all(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1);
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
int i;
int lowbound = 0;
bool res = true;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; i++)
{
int idx = hstoreFindKey(hs, &lowbound,
key_pairs[i].key, key_pairs[i].keylen);
if (idx < 0)
{
res = false;
break;
}
}
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_defined);
Datum hstore_defined(PG_FUNCTION_ARGS);
Datum
hstore_defined(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
bool res = (idx >= 0 && !HS_VALISNULL(entries, idx));
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_delete);
Datum hstore_delete(PG_FUNCTION_ARGS);
Datum
hstore_delete(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
char *keyptr = VARDATA_ANY(key);
int keylen = VARSIZE_ANY_EXHDR(key);
HStore *out = palloc(VARSIZE(hs));
char *bufs,
*bufd,
*ptrd;
HEntry *es,
*ed;
int i;
int count = HS_COUNT(hs);
int outcount = 0;
SET_VARSIZE(out, VARSIZE(hs));
HS_SETCOUNT(out, count); /* temporary! */
bufs = STRPTR(hs);
es = ARRPTR(hs);
bufd = ptrd = STRPTR(out);
ed = ARRPTR(out);
for (i = 0; i < count; ++i)
{
int len = HS_KEYLEN(es, i);
char *ptrs = HS_KEY(es, bufs, i);
if (!(len == keylen && memcmp(ptrs, keyptr, keylen) == 0))
{
int vallen = HS_VALLEN(es, i);
HS_COPYITEM(ed, bufd, ptrd, ptrs, len, vallen, HS_VALISNULL(es, i));
++outcount;
}
}
HS_FINALIZE(out, outcount, bufd, ptrd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_delete_array);
Datum hstore_delete_array(PG_FUNCTION_ARGS);
Datum
hstore_delete_array(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HStore *out = palloc(VARSIZE(hs));
int hs_count = HS_COUNT(hs);
char *ps,
*bufd,
*pd;
HEntry *es,
*ed;
int i,
j;
int outcount = 0;
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
SET_VARSIZE(out, VARSIZE(hs));
HS_SETCOUNT(out, hs_count); /* temporary! */
ps = STRPTR(hs);
es = ARRPTR(hs);
bufd = pd = STRPTR(out);
ed = ARRPTR(out);
if (nkeys == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, hs, VARSIZE(hs));
HS_FIXSIZE(out, hs_count);
HS_SETCOUNT(out, hs_count);
PG_RETURN_POINTER(out);
}
/*
* this is in effect a merge between hs and key_pairs, both of which are
* already sorted by (keylen,key); we take keys from hs only
*/
for (i = j = 0; i < hs_count;)
{
int difference;
if (j >= nkeys)
difference = -1;
else
{
int skeylen = HS_KEYLEN(es, i);
if (skeylen == key_pairs[j].keylen)
difference = memcmp(HS_KEY(es, ps, i),
key_pairs[j].key,
key_pairs[j].keylen);
else
difference = (skeylen > key_pairs[j].keylen) ? 1 : -1;
}
if (difference > 0)
++j;
else if (difference == 0)
++i, ++j;
else
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
HS_VALLEN(es, i), HS_VALISNULL(es, i));
++outcount;
++i;
}
}
HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_delete_hstore);
Datum hstore_delete_hstore(PG_FUNCTION_ARGS);
Datum
hstore_delete_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HStore *hs2 = PG_GETARG_HS(1);
HStore *out = palloc(VARSIZE(hs));
int hs_count = HS_COUNT(hs);
int hs2_count = HS_COUNT(hs2);
char *ps,
*ps2,
*bufd,
*pd;
HEntry *es,
*es2,
*ed;
int i,
j;
int outcount = 0;
SET_VARSIZE(out, VARSIZE(hs));
HS_SETCOUNT(out, hs_count); /* temporary! */
ps = STRPTR(hs);
es = ARRPTR(hs);
ps2 = STRPTR(hs2);
es2 = ARRPTR(hs2);
bufd = pd = STRPTR(out);
ed = ARRPTR(out);
if (hs2_count == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, hs, VARSIZE(hs));
HS_FIXSIZE(out, hs_count);
HS_SETCOUNT(out, hs_count);
PG_RETURN_POINTER(out);
}
/*
* this is in effect a merge between hs and hs2, both of which are already
* sorted by (keylen,key); we take keys from hs only; for equal keys, we
* take the value from hs unless the values are equal
*/
for (i = j = 0; i < hs_count;)
{
int difference;
if (j >= hs2_count)
difference = -1;
else
{
int skeylen = HS_KEYLEN(es, i);
int s2keylen = HS_KEYLEN(es2, j);
if (skeylen == s2keylen)
difference = memcmp(HS_KEY(es, ps, i),
HS_KEY(es2, ps2, j),
skeylen);
else
difference = (skeylen > s2keylen) ? 1 : -1;
}
if (difference > 0)
++j;
else if (difference == 0)
{
int svallen = HS_VALLEN(es, i);
int snullval = HS_VALISNULL(es, i);
if (snullval != HS_VALISNULL(es2, j)
|| (!snullval
&& (svallen != HS_VALLEN(es2, j)
|| memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
svallen, snullval);
++outcount;
}
++i, ++j;
}
else
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
HS_VALLEN(es, i), HS_VALISNULL(es, i));
++outcount;
++i;
}
}
HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_concat);
Datum hstore_concat(PG_FUNCTION_ARGS);
Datum
hstore_concat(PG_FUNCTION_ARGS)
{
HStore *s1 = PG_GETARG_HS(0);
HStore *s2 = PG_GETARG_HS(1);
HStore *out = palloc(VARSIZE(s1) + VARSIZE(s2));
char *ps1,
*ps2,
*bufd,
*pd;
HEntry *es1,
*es2,
*ed;
int s1idx;
int s2idx;
int s1count = HS_COUNT(s1);
int s2count = HS_COUNT(s2);
int outcount = 0;
SET_VARSIZE(out, VARSIZE(s1) + VARSIZE(s2) - HSHRDSIZE);
HS_SETCOUNT(out, s1count + s2count);
if (s1count == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, s2, VARSIZE(s2));
HS_FIXSIZE(out, s2count);
HS_SETCOUNT(out, s2count);
PG_RETURN_POINTER(out);
}
if (s2count == 0)
{
/* return a copy of the input, unchanged */
memcpy(out, s1, VARSIZE(s1));
HS_FIXSIZE(out, s1count);
HS_SETCOUNT(out, s1count);
PG_RETURN_POINTER(out);
}
ps1 = STRPTR(s1);
ps2 = STRPTR(s2);
bufd = pd = STRPTR(out);
es1 = ARRPTR(s1);
es2 = ARRPTR(s2);
ed = ARRPTR(out);
/*
* this is in effect a merge between s1 and s2, both of which are already
* sorted by (keylen,key); we take s2 for equal keys
*/
for (s1idx = s2idx = 0; s1idx < s1count || s2idx < s2count; ++outcount)
{
int difference;
if (s1idx >= s1count)
difference = 1;
else if (s2idx >= s2count)
difference = -1;
else
{
int s1keylen = HS_KEYLEN(es1, s1idx);
int s2keylen = HS_KEYLEN(es2, s2idx);
if (s1keylen == s2keylen)
difference = memcmp(HS_KEY(es1, ps1, s1idx),
HS_KEY(es2, ps2, s2idx),
s1keylen);
else
difference = (s1keylen > s2keylen) ? 1 : -1;
}
if (difference >= 0)
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es2, ps2, s2idx), HS_KEYLEN(es2, s2idx),
HS_VALLEN(es2, s2idx), HS_VALISNULL(es2, s2idx));
++s2idx;
if (difference == 0)
++s1idx;
}
else
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es1, ps1, s1idx), HS_KEYLEN(es1, s1idx),
HS_VALLEN(es1, s1idx), HS_VALISNULL(es1, s1idx));
++s1idx;
}
}
HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_slice_to_array);
Datum hstore_slice_to_array(PG_FUNCTION_ARGS);
Datum
hstore_slice_to_array(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
ArrayType *aout;
Datum *key_datums;
bool *key_nulls;
Datum *out_datums;
bool *out_nulls;
int key_count;
int i;
deconstruct_array(key_array,
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
if (key_count == 0)
{
aout = construct_empty_array(TEXTOID);
PG_RETURN_POINTER(aout);
}
out_datums = palloc(sizeof(Datum) * key_count);
out_nulls = palloc(sizeof(bool) * key_count);
for (i = 0; i < key_count; ++i)
{
text *key = (text *) DatumGetPointer(key_datums[i]);
int idx;
if (key_nulls[i])
idx = -1;
else
idx = hstoreFindKey(hs, NULL, VARDATA(key), VARSIZE(key) - VARHDRSZ);
if (idx < 0 || HS_VALISNULL(entries, idx))
{
out_nulls[i] = true;
out_datums[i] = (Datum) 0;
}
else
{
out_datums[i] = PointerGetDatum(
cstring_to_text_with_len(HS_VAL(entries, ptr, idx),
HS_VALLEN(entries, idx)));
out_nulls[i] = false;
}
}
aout = construct_md_array(out_datums, out_nulls,
ARR_NDIM(key_array),
ARR_DIMS(key_array),
ARR_LBOUND(key_array),
TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(aout);
}
PG_FUNCTION_INFO_V1(hstore_slice_to_hstore);
Datum hstore_slice_to_hstore(PG_FUNCTION_ARGS);
Datum
hstore_slice_to_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
HStore *out;
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
Pairs *out_pairs;
int bufsiz;
int lastidx = 0;
int i;
int out_count = 0;
if (nkeys == 0)
{
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
}
/* hstoreArrayToPairs() checked overflow */
out_pairs = palloc(sizeof(Pairs) * nkeys);
bufsiz = 0;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; ++i)
{
int idx = hstoreFindKey(hs, &lastidx,
key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
out_pairs[out_count].key = key_pairs[i].key;
bufsiz += (out_pairs[out_count].keylen = key_pairs[i].keylen);
out_pairs[out_count].val = HS_VAL(entries, ptr, idx);
bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries, idx));
out_pairs[out_count].isnull = HS_VALISNULL(entries, idx);
out_pairs[out_count].needfree = false;
++out_count;
}
}
/*
* we don't use uniquePairs here because we know that the pairs list is
* already sorted and uniq'ed.
*/
out = hstorePairs(out_pairs, out_count, bufsiz);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_akeys);
Datum hstore_akeys(PG_FUNCTION_ARGS);
Datum
hstore_akeys(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
Datum *d;
ArrayType *a;
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
int count = HS_COUNT(hs);
int i;
if (count == 0)
{
a = construct_empty_array(TEXTOID);
PG_RETURN_POINTER(a);
}
d = (Datum *) palloc(sizeof(Datum) * count);
for (i = 0; i < count; ++i)
{
text *item = cstring_to_text_with_len(HS_KEY(entries, base, i),
HS_KEYLEN(entries, i));
d[i] = PointerGetDatum(item);
}
a = construct_array(d, count,
TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(a);
}
PG_FUNCTION_INFO_V1(hstore_avals);
Datum hstore_avals(PG_FUNCTION_ARGS);
Datum
hstore_avals(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
Datum *d;
bool *nulls;
ArrayType *a;
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
int count = HS_COUNT(hs);
int lb = 1;
int i;
if (count == 0)
{
a = construct_empty_array(TEXTOID);
PG_RETURN_POINTER(a);
}
d = (Datum *) palloc(sizeof(Datum) * count);
nulls = (bool *) palloc(sizeof(bool) * count);
for (i = 0; i < count; ++i)
{
if (HS_VALISNULL(entries, i))
{
d[i] = (Datum) 0;
nulls[i] = true;
}
else
{
text *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
HS_VALLEN(entries, i));
d[i] = PointerGetDatum(item);
nulls[i] = false;
}
}
a = construct_md_array(d, nulls, 1, &count, &lb,
TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(a);
}
static ArrayType *
hstore_to_array_internal(HStore *hs, int ndims)
{
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
int count = HS_COUNT(hs);
int out_size[2] = {0, 2};
int lb[2] = {1, 1};
Datum *out_datums;
bool *out_nulls;
int i;
Assert(ndims < 3);
if (count == 0 || ndims == 0)
return construct_empty_array(TEXTOID);
out_size[0] = count * 2 / ndims;
out_datums = palloc(sizeof(Datum) * count * 2);
out_nulls = palloc(sizeof(bool) * count * 2);
for (i = 0; i < count; ++i)
{
text *key = cstring_to_text_with_len(HS_KEY(entries, base, i),
HS_KEYLEN(entries, i));
out_datums[i * 2] = PointerGetDatum(key);
out_nulls[i * 2] = false;
if (HS_VALISNULL(entries, i))
{
out_datums[i * 2 + 1] = (Datum) 0;
out_nulls[i * 2 + 1] = true;
}
else
{
text *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
HS_VALLEN(entries, i));
out_datums[i * 2 + 1] = PointerGetDatum(item);
out_nulls[i * 2 + 1] = false;
}
}
return construct_md_array(out_datums, out_nulls,
ndims, out_size, lb,
TEXTOID, -1, false, 'i');
}
PG_FUNCTION_INFO_V1(hstore_to_array);
Datum hstore_to_array(PG_FUNCTION_ARGS);
Datum
hstore_to_array(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *out = hstore_to_array_internal(hs, 1);
PG_RETURN_POINTER(out);
}
PG_FUNCTION_INFO_V1(hstore_to_matrix);
Datum hstore_to_matrix(PG_FUNCTION_ARGS);
Datum
hstore_to_matrix(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *out = hstore_to_array_internal(hs, 2);
PG_RETURN_POINTER(out);
}
/*
* Common initialization function for the various set-returning
* funcs. fcinfo is only passed if the function is to return a
* composite; it will be used to look up the return tupledesc.
* we stash a copy of the hstore in the multi-call context in
* case it was originally toasted. (At least I assume that's why;
* there was no explanatory comment in the original code. --AG)
*/
static void
setup_firstcall(FuncCallContext *funcctx, HStore *hs,
FunctionCallInfoData *fcinfo)
{
MemoryContext oldcontext;
HStore *st;
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
st = (HStore *) palloc(VARSIZE(hs));
memcpy(st, hs, VARSIZE(hs));
funcctx->user_fctx = (void *) st;
if (fcinfo)
{
TupleDesc tupdesc;
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
}
MemoryContextSwitchTo(oldcontext);
}
PG_FUNCTION_INFO_V1(hstore_skeys);
Datum hstore_skeys(PG_FUNCTION_ARGS);
Datum
hstore_skeys(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
HStore *hs;
int i;
if (SRF_IS_FIRSTCALL())
{
hs = PG_GETARG_HS(0);
funcctx = SRF_FIRSTCALL_INIT();
setup_firstcall(funcctx, hs, NULL);
}
funcctx = SRF_PERCALL_SETUP();
hs = (HStore *) funcctx->user_fctx;
i = funcctx->call_cntr;
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
text *item;
item = cstring_to_text_with_len(HS_KEY(entries, STRPTR(hs), i),
HS_KEYLEN(entries, i));
SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
}
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(hstore_svals);
Datum hstore_svals(PG_FUNCTION_ARGS);
Datum
hstore_svals(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
HStore *hs;
int i;
if (SRF_IS_FIRSTCALL())
{
hs = PG_GETARG_HS(0);
funcctx = SRF_FIRSTCALL_INIT();
setup_firstcall(funcctx, hs, NULL);
}
funcctx = SRF_PERCALL_SETUP();
hs = (HStore *) funcctx->user_fctx;
i = funcctx->call_cntr;
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
if (HS_VALISNULL(entries, i))
{
ReturnSetInfo *rsi;
/* ugly ugly ugly. why no macro for this? */
(funcctx)->call_cntr++;
rsi = (ReturnSetInfo *) fcinfo->resultinfo;
rsi->isDone = ExprMultipleResult;
PG_RETURN_NULL();
}
else
{
text *item;
item = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), i),
HS_VALLEN(entries, i));
SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
}
}
SRF_RETURN_DONE(funcctx);
}
PG_FUNCTION_INFO_V1(hstore_contains);
Datum hstore_contains(PG_FUNCTION_ARGS);
Datum
hstore_contains(PG_FUNCTION_ARGS)
{
HStore *val = PG_GETARG_HS(0);
HStore *tmpl = PG_GETARG_HS(1);
bool res = true;
HEntry *te = ARRPTR(tmpl);
char *tstr = STRPTR(tmpl);
HEntry *ve = ARRPTR(val);
char *vstr = STRPTR(val);
int tcount = HS_COUNT(tmpl);
int lastidx = 0;
int i;
/*
* we exploit the fact that keys in "tmpl" are in strictly increasing
* order to narrow the hstoreFindKey search; each search can start one
* entry past the previous "found" entry, or at the lower bound of the
* search
*/
for (i = 0; res && i < tcount; ++i)
{
int idx = hstoreFindKey(val, &lastidx,
HS_KEY(te, tstr, i), HS_KEYLEN(te, i));
if (idx >= 0)
{
bool nullval = HS_VALISNULL(te, i);
int vallen = HS_VALLEN(te, i);
if (nullval != HS_VALISNULL(ve, idx)
|| (!nullval
&& (vallen != HS_VALLEN(ve, idx)
|| memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
res = false;
}
else
res = false;
}
PG_RETURN_BOOL(res);
}
PG_FUNCTION_INFO_V1(hstore_contained);
Datum hstore_contained(PG_FUNCTION_ARGS);
Datum
hstore_contained(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(hstore_contains,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
));
}
PG_FUNCTION_INFO_V1(hstore_each);
Datum hstore_each(PG_FUNCTION_ARGS);
Datum
hstore_each(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
HStore *hs;
int i;
if (SRF_IS_FIRSTCALL())
{
hs = PG_GETARG_HS(0);
funcctx = SRF_FIRSTCALL_INIT();
setup_firstcall(funcctx, hs, fcinfo);
}
funcctx = SRF_PERCALL_SETUP();
hs = (HStore *) funcctx->user_fctx;
i = funcctx->call_cntr;
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
Datum res,
dvalues[2];
bool nulls[2] = {false, false};
text *item;
HeapTuple tuple;
item = cstring_to_text_with_len(HS_KEY(entries, ptr, i),
HS_KEYLEN(entries, i));
dvalues[0] = PointerGetDatum(item);
if (HS_VALISNULL(entries, i))
{
dvalues[1] = (Datum) 0;
nulls[1] = true;
}
else
{
item = cstring_to_text_with_len(HS_VAL(entries, ptr, i),
HS_VALLEN(entries, i));
dvalues[1] = PointerGetDatum(item);
}
tuple = heap_form_tuple(funcctx->tuple_desc, dvalues, nulls);
res = HeapTupleGetDatum(tuple);
SRF_RETURN_NEXT(funcctx, PointerGetDatum(res));
}
SRF_RETURN_DONE(funcctx);
}
/*
* btree sort order for hstores isn't intended to be useful; we really only
* care about equality versus non-equality. we compare the entire string
* buffer first, then the entry pos array.
*/
PG_FUNCTION_INFO_V1(hstore_cmp);
Datum hstore_cmp(PG_FUNCTION_ARGS);
Datum
hstore_cmp(PG_FUNCTION_ARGS)
{
HStore *hs1 = PG_GETARG_HS(0);
HStore *hs2 = PG_GETARG_HS(1);
int hcount1 = HS_COUNT(hs1);
int hcount2 = HS_COUNT(hs2);
int res = 0;
if (hcount1 == 0 || hcount2 == 0)
{
/*
* if either operand is empty, and the other is nonempty, the nonempty
* one is larger. If both are empty they are equal.
*/
if (hcount1 > 0)
res = 1;
else if (hcount2 > 0)
res = -1;
}
else
{
/* here we know both operands are nonempty */
char *str1 = STRPTR(hs1);
char *str2 = STRPTR(hs2);
HEntry *ent1 = ARRPTR(hs1);
HEntry *ent2 = ARRPTR(hs2);
size_t len1 = HSE_ENDPOS(ent1[2 * hcount1 - 1]);
size_t len2 = HSE_ENDPOS(ent2[2 * hcount2 - 1]);
res = memcmp(str1, str2, Min(len1, len2));
if (res == 0)
{
if (len1 > len2)
res = 1;
else if (len1 < len2)
res = -1;
else if (hcount1 > hcount2)
res = 1;
else if (hcount2 > hcount1)
res = -1;
else
{
int count = hcount1 * 2;
int i;
for (i = 0; i < count; ++i)
if (HSE_ENDPOS(ent1[i]) != HSE_ENDPOS(ent2[i]) ||
HSE_ISNULL(ent1[i]) != HSE_ISNULL(ent2[i]))
break;
if (i < count)
{
if (HSE_ENDPOS(ent1[i]) < HSE_ENDPOS(ent2[i]))
res = -1;
else if (HSE_ENDPOS(ent1[i]) > HSE_ENDPOS(ent2[i]))
res = 1;
else if (HSE_ISNULL(ent1[i]))
res = 1;
else if (HSE_ISNULL(ent2[i]))
res = -1;
}
}
}
else
{
res = (res > 0) ? 1 : -1;
}
}
/*
* this is a btree support function; this is one of the few places where
* memory needs to be explicitly freed.
*/
PG_FREE_IF_COPY(hs1, 0);
PG_FREE_IF_COPY(hs2, 1);
PG_RETURN_INT32(res);
}
PG_FUNCTION_INFO_V1(hstore_eq);
Datum hstore_eq(PG_FUNCTION_ARGS);
Datum
hstore_eq(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res == 0);
}
PG_FUNCTION_INFO_V1(hstore_ne);
Datum hstore_ne(PG_FUNCTION_ARGS);
Datum
hstore_ne(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res != 0);
}
PG_FUNCTION_INFO_V1(hstore_gt);
Datum hstore_gt(PG_FUNCTION_ARGS);
Datum
hstore_gt(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res > 0);
}
PG_FUNCTION_INFO_V1(hstore_ge);
Datum hstore_ge(PG_FUNCTION_ARGS);
Datum
hstore_ge(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res >= 0);
}
PG_FUNCTION_INFO_V1(hstore_lt);
Datum hstore_lt(PG_FUNCTION_ARGS);
Datum
hstore_lt(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res < 0);
}
PG_FUNCTION_INFO_V1(hstore_le);
Datum hstore_le(PG_FUNCTION_ARGS);
Datum
hstore_le(PG_FUNCTION_ARGS)
{
int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_BOOL(res <= 0);
}
PG_FUNCTION_INFO_V1(hstore_hash);
Datum hstore_hash(PG_FUNCTION_ARGS);
Datum
hstore_hash(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
Datum hval = hash_any((unsigned char *) VARDATA(hs),
VARSIZE(hs) - VARHDRSZ);
/*
* this is the only place in the code that cares whether the overall
* varlena size exactly matches the true data size; this assertion should
* be maintained by all the other code, but we make it explicit here.
*/
Assert(VARSIZE(hs) ==
(HS_COUNT(hs) != 0 ?
CALCDATASIZE(HS_COUNT(hs),
HSE_ENDPOS(ARRPTR(hs)[2 * HS_COUNT(hs) - 1])) :
HSHRDSIZE));
PG_FREE_IF_COPY(hs, 0);
PG_RETURN_DATUM(hval);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2020_2 |
crossvul-cpp_data_bad_5797_0 |
/*
* Local APIC virtualization
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright (C) 2007 Novell
* Copyright (C) 2007 Intel
* Copyright 2009 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Dor Laor <dor.laor@qumranet.com>
* Gregory Haskins <ghaskins@novell.com>
* Yaozu (Eddie) Dong <eddie.dong@intel.com>
*
* Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include "kvm_cache_regs.h"
#include "irq.h"
#include "trace.h"
#include "x86.h"
#include "cpuid.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else
#define mod_64(x, y) ((x) % (y))
#endif
#define PRId64 "d"
#define PRIx64 "llx"
#define PRIu64 "u"
#define PRIo64 "o"
#define APIC_BUS_CYCLE_NS 1
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
#define apic_debug(fmt, arg...)
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
#define LAPIC_MMIO_LENGTH (1 << 12)
/* followed define is not in apicdef.h */
#define APIC_SHORT_MASK 0xc0000
#define APIC_DEST_NOSHORT 0x0
#define APIC_DEST_MASK 0x800
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
static unsigned int min_timer_period_us = 500;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
*((u32 *) (apic->regs + reg_off)) = val;
}
static inline int apic_test_vector(int vec, void *bitmap)
{
return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return apic_test_vector(vector, apic->regs + APIC_ISR) ||
apic_test_vector(vector, apic->regs + APIC_IRR);
}
static inline void apic_set_vector(int vec, void *bitmap)
{
set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline void apic_clear_vector(int vec, void *bitmap)
{
clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline int __apic_test_and_set_vector(int vec, void *bitmap)
{
return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
{
return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
struct static_key_deferred apic_hw_disabled __read_mostly;
struct static_key_deferred apic_sw_disabled __read_mostly;
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{
if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) {
if (val & APIC_SPIV_APIC_ENABLED)
static_key_slow_dec_deferred(&apic_sw_disabled);
else
static_key_slow_inc(&apic_sw_disabled.key);
}
apic_set_reg(apic, APIC_SPIV, val);
}
static inline int apic_enabled(struct kvm_lapic *apic)
{
return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
}
#define LVT_MASK \
(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
#define LINT_MASK \
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
static inline int kvm_apic_id(struct kvm_lapic *apic)
{
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
mutex_lock(&kvm->arch.apic_map_lock);
if (!new)
goto out;
new->ldr_bits = 8;
/* flat mode is default */
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
* table. After reset APICs are in xapic/flat mode, so if we
* find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
new->cid_mask = new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
new->cid_shift = 4;
new->cid_mask = 0xf;
new->lid_mask = 0xf;
}
new->phys_map[kvm_apic_id(apic)] = apic;
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
if (lid)
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
kfree_rcu(old, rcu);
kvm_vcpu_request_scan_ioapic(kvm);
}
static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
{
apic_set_reg(apic, APIC_ID, id << 24);
recalculate_apic_map(apic->vcpu->kvm);
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
apic_set_reg(apic, APIC_LDR, id);
recalculate_apic_map(apic->vcpu->kvm);
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
{
return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
}
static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
{
return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
}
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
}
static inline int apic_lvtt_period(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
}
static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) ==
APIC_LVT_TIMER_TSCDEADLINE);
}
static inline int apic_lvt_nmi_mode(u32 lvt_val)
{
return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
}
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *feat;
u32 v = APIC_VERSION;
if (!kvm_vcpu_has_lapic(vcpu))
return;
feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
v |= APIC_LVR_DIRECTED_EOI;
apic_set_reg(apic, APIC_LVR, v);
}
static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
LVT_MASK | APIC_MODE_MASK, /* LVTPC */
LINT_MASK, LINT_MASK, /* LVT0-1 */
LVT_MASK /* LVTERR */
};
static int find_highest_vector(void *bitmap)
{
int vec;
u32 *reg;
for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
vec >= 0; vec -= APIC_VECTORS_PER_REG) {
reg = bitmap + REG_POS(vec);
if (*reg)
return fls(*reg) - 1 + vec;
}
return -1;
}
static u8 count_vectors(void *bitmap)
{
int vec;
u32 *reg;
u8 count = 0;
for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
reg = bitmap + REG_POS(vec);
count += hweight32(*reg);
}
return count;
}
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
{
u32 i, pir_val;
struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i <= 7; i++) {
pir_val = xchg(&pir[i], 0);
if (pir_val)
*((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
}
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = true;
apic_set_vector(vec, apic->regs + APIC_IRR);
}
static inline int apic_search_irr(struct kvm_lapic *apic)
{
return find_highest_vector(apic->regs + APIC_IRR);
}
static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{
int result;
/*
* Note that irr_pending is just a hint. It will be always
* true with virtual interrupt delivery enabled.
*/
if (!apic->irr_pending)
return -1;
kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
result = apic_search_irr(apic);
ASSERT(result == -1 || result >= 16);
return result;
}
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = false;
apic_clear_vector(vec, apic->regs + APIC_IRR);
if (apic_search_irr(apic) != -1)
apic->irr_pending = true;
}
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
/*
* ISR (in service register) bit is set when injecting an interrupt.
* The highest vector is injected. Thus the latest bit set matches
* the highest bit in ISR.
*/
apic->highest_isr_cache = vec;
}
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{
if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
apic->highest_isr_cache = -1;
}
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
int highest_irr;
/* This may race with setting of irr in __apic_accept_irq() and
* value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
* will cause vmexit immediately and the value will be recalculated
* on the next vmentry.
*/
if (!kvm_vcpu_has_lapic(vcpu))
return 0;
highest_irr = apic_find_highest_irr(vcpu->arch.apic);
return highest_irr;
}
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
unsigned long *dest_map);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
unsigned long *dest_map)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
irq->level, irq->trig_mode, dest_map);
}
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
sizeof(val));
}
static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
{
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
sizeof(*val));
}
static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
apic_debug("Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return val & 0x1;
}
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
apic_debug("Can't set EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return;
}
__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
apic_debug("Can't clear EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return;
}
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{
int result;
/* Note that isr_count is always 1 with vid enabled */
if (!apic->isr_count)
return -1;
if (likely(apic->highest_isr_cache != -1))
return apic->highest_isr_cache;
result = find_highest_vector(apic->regs + APIC_ISR);
ASSERT(result == -1 || result >= 16);
return result;
}
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int i;
for (i = 0; i < 8; i++)
apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]);
}
static void apic_update_ppr(struct kvm_lapic *apic)
{
u32 tpr, isrv, ppr, old_ppr;
int isr;
old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI);
tpr = kvm_apic_get_reg(apic, APIC_TASKPRI);
isr = apic_find_highest_isr(apic);
isrv = (isr != -1) ? isr : 0;
if ((tpr & 0xf0) >= (isrv & 0xf0))
ppr = tpr & 0xff;
else
ppr = isrv & 0xf0;
apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
apic, ppr, isr, isrv);
if (old_ppr != ppr) {
apic_set_reg(apic, APIC_PROCPRI, ppr);
if (ppr < old_ppr)
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
}
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
apic_set_reg(apic, APIC_TASKPRI, tpr);
apic_update_ppr(apic);
}
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
{
return dest == 0xff || kvm_apic_id(apic) == dest;
}
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
{
int result = 0;
u32 logical_id;
if (apic_x2apic_mode(apic)) {
logical_id = kvm_apic_get_reg(apic, APIC_LDR);
return logical_id & mda;
}
logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR));
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
if (logical_id & mda)
result = 1;
break;
case APIC_DFR_CLUSTER:
if (((logical_id >> 4) == (mda >> 0x4))
&& (logical_id & mda & 0xf))
result = 1;
break;
default:
apic_debug("Bad DFR vcpu %d: %08x\n",
apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
break;
}
return result;
}
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode)
{
int result = 0;
struct kvm_lapic *target = vcpu->arch.apic;
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
target, source, dest, dest_mode, short_hand);
ASSERT(target);
switch (short_hand) {
case APIC_DEST_NOSHORT:
if (dest_mode == 0)
/* Physical mode. */
result = kvm_apic_match_physical_addr(target, dest);
else
/* Logical mode. */
result = kvm_apic_match_logical_addr(target, dest);
break;
case APIC_DEST_SELF:
result = (target == source);
break;
case APIC_DEST_ALLINC:
result = 1;
break;
case APIC_DEST_ALLBUT:
result = (target != source);
break;
default:
apic_debug("kvm: apic: Bad dest shorthand value %x\n",
short_hand);
break;
}
return result;
}
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
{
struct kvm_apic_map *map;
unsigned long bitmap = 1;
struct kvm_lapic **dst;
int i;
bool ret = false;
*r = -1;
if (irq->shorthand == APIC_DEST_SELF) {
*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
return true;
}
if (irq->shorthand)
return false;
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
if (!map)
goto out;
if (irq->dest_mode == 0) { /* physical mode */
if (irq->delivery_mode == APIC_DM_LOWEST ||
irq->dest_id == 0xff)
goto out;
dst = &map->phys_map[irq->dest_id & 0xff];
} else {
u32 mda = irq->dest_id << (32 - map->ldr_bits);
dst = map->logical_map[apic_cluster_id(map, mda)];
bitmap = apic_logical_id(map, mda);
if (irq->delivery_mode == APIC_DM_LOWEST) {
int l = -1;
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
if (l < 0)
l = i;
else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
l = i;
}
bitmap = (l >= 0) ? 1 << l : 0;
}
}
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
if (*r < 0)
*r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
ret = true;
out:
rcu_read_unlock();
return ret;
}
/*
* Add a pending IRQ into lapic.
* Return 1 if successfully added and 0 if discarded.
*/
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
unsigned long *dest_map)
{
int result = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
case APIC_DM_FIXED:
/* FIXME add logic for vcpu on reset */
if (unlikely(!apic_enabled(apic)))
break;
result = 1;
if (dest_map)
__set_bit(vcpu->vcpu_id, dest_map);
if (kvm_x86_ops->deliver_posted_interrupt)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
apic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
trig_mode, vector, false);
break;
case APIC_DM_REMRD:
result = 1;
vcpu->arch.pv.pv_unhalted = 1;
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_SMI:
apic_debug("Ignoring guest SMI\n");
break;
case APIC_DM_NMI:
result = 1;
kvm_inject_nmi(vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_INIT:
if (!trig_mode || level) {
result = 1;
/* assumes that there are only KVM_APIC_INIT/SIPI */
apic->pending_events = (1UL << KVM_APIC_INIT);
/* make sure pending_events is visible before sending
* the request */
smp_wmb();
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
} else {
apic_debug("Ignoring de-assert INIT to vcpu %d\n",
vcpu->vcpu_id);
}
break;
case APIC_DM_STARTUP:
apic_debug("SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector);
result = 1;
apic->sipi_vector = vector;
/* make sure sipi_vector is visible for the receiver */
smp_wmb();
set_bit(KVM_APIC_SIPI, &apic->pending_events);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_EXTINT:
/*
* Should only be called by kvm_apic_local_deliver() with LVT0,
* before NMI watchdog was enabled. Already handled by
* kvm_apic_accept_pic_intr().
*/
break;
default:
printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
delivery_mode);
break;
}
return result;
}
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
}
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;
else
trigger_mode = IOAPIC_EDGE_TRIG;
kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
}
}
static int apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
trace_kvm_eoi(apic, vector);
/*
* Not every write EOI will has corresponding ISR,
* one example is when Kernel check timer on setup_IO_APIC
*/
if (vector == -1)
return vector;
apic_clear_isr(vector, apic);
apic_update_ppr(apic);
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
return vector;
}
/*
* this interface assumes a trap-like exit, which has already finished
* desired side effect including vISR and vPPR update.
*/
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_lapic *apic = vcpu->arch.apic;
trace_kvm_eoi(apic, vector);
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
static void apic_send_ipi(struct kvm_lapic *apic)
{
u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2);
struct kvm_lapic_irq irq;
irq.vector = icr_low & APIC_VECTOR_MASK;
irq.delivery_mode = icr_low & APIC_MODE_MASK;
irq.dest_mode = icr_low & APIC_DEST_MASK;
irq.level = icr_low & APIC_INT_ASSERT;
irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
irq.shorthand = icr_low & APIC_SHORT_MASK;
if (apic_x2apic_mode(apic))
irq.dest_id = icr_high;
else
irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
trace_kvm_apic_ipi(icr_low, irq.dest_id);
apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
"dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
icr_high, icr_low, irq.shorthand, irq.dest_id,
irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector);
kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
}
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
ktime_t remaining;
s64 ns;
u32 tmcct;
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
apic->lapic_timer.period == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
tmcct = div64_u64(ns,
(APIC_BUS_CYCLE_NS * apic->divide_count));
return tmcct;
}
static void __report_tpr_access(struct kvm_lapic *apic, bool write)
{
struct kvm_vcpu *vcpu = apic->vcpu;
struct kvm_run *run = vcpu->run;
kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.is_write = write;
}
static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
{
if (apic->vcpu->arch.tpr_access_reporting)
__report_tpr_access(apic, write);
}
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{
u32 val = 0;
if (offset >= LAPIC_MMIO_LENGTH)
return 0;
switch (offset) {
case APIC_ID:
if (apic_x2apic_mode(apic))
val = kvm_apic_id(apic);
else
val = kvm_apic_id(apic) << 24;
break;
case APIC_ARBPRI:
apic_debug("Access APIC ARBPRI register which is for P6\n");
break;
case APIC_TMCCT: /* Timer CCR */
if (apic_lvtt_tscdeadline(apic))
return 0;
val = apic_get_tmcct(apic);
break;
case APIC_PROCPRI:
apic_update_ppr(apic);
val = kvm_apic_get_reg(apic, offset);
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
/* fall thru */
default:
val = kvm_apic_get_reg(apic, offset);
break;
}
return val;
}
static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
{
return container_of(dev, struct kvm_lapic, dev);
}
static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
void *data)
{
unsigned char alignment = offset & 0xf;
u32 result;
/* this bitmask has a bit cleared for each reserved register */
static const u64 rmask = 0x43ff01ffffffe70cULL;
if ((alignment + len) > 4) {
apic_debug("KVM_APIC_READ: alignment error %x %d\n",
offset, len);
return 1;
}
if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
apic_debug("KVM_APIC_READ: read reserved register %x\n",
offset);
return 1;
}
result = __apic_read(apic, offset & ~0xf);
trace_kvm_apic_read(offset, result);
switch (len) {
case 1:
case 2:
case 4:
memcpy(data, (char *)&result + alignment, len);
break;
default:
printk(KERN_ERR "Local APIC read with len = %x, "
"should be 1,2, or 4 instead\n", len);
break;
}
return 0;
}
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
return kvm_apic_hw_enabled(apic) &&
addr >= apic->base_address &&
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_io_device *this,
gpa_t address, int len, void *data)
{
struct kvm_lapic *apic = to_lapic(this);
u32 offset = address - apic->base_address;
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
apic_reg_read(apic, offset, len, data);
return 0;
}
static void update_divide_count(struct kvm_lapic *apic)
{
u32 tmp1, tmp2, tdcr;
tdcr = kvm_apic_get_reg(apic, APIC_TDCR);
tmp1 = tdcr & 0xf;
tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
apic->divide_count = 0x1 << (tmp2 & 0x7);
apic_debug("timer divide count is 0x%x\n",
apic->divide_count);
}
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
/* lapic timer in oneshot or periodic mode */
now = apic->lapic_timer.timer.base->get_time();
apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
* APIC_BUS_CYCLE_NS * apic->divide_count;
if (!apic->lapic_timer.period)
return;
/*
* Do not allow the guest to program periodic timers with small
* interval, since the hrtimers are not throttled by the host
* scheduler.
*/
if (apic_lvtt_period(apic)) {
s64 min_period = min_timer_period_us * 1000LL;
if (apic->lapic_timer.period < min_period) {
pr_info_ratelimited(
"kvm: vcpu %i: requested %lld ns "
"lapic timer period limited to %lld ns\n",
apic->vcpu->vcpu_id,
apic->lapic_timer.period, min_period);
apic->lapic_timer.period = min_period;
}
}
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, apic->lapic_timer.period),
HRTIMER_MODE_ABS);
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
"timer initial count 0x%x, period %lldns, "
"expire @ 0x%016" PRIx64 ".\n", __func__,
APIC_BUS_CYCLE_NS, ktime_to_ns(now),
kvm_apic_get_reg(apic, APIC_TMICT),
apic->lapic_timer.period,
ktime_to_ns(ktime_add_ns(now,
apic->lapic_timer.period)));
} else if (apic_lvtt_tscdeadline(apic)) {
/* lapic timer in tsc deadline mode */
u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
u64 ns = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;
if (unlikely(!tscdeadline || !this_tsc_khz))
return;
local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time();
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
}
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
local_irq_restore(flags);
}
}
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{
int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0));
if (apic_lvt_nmi_mode(lvt0_val)) {
if (!nmi_wd_enabled) {
apic_debug("Receive NMI setting on APIC_LVT0 "
"for cpu %d\n", apic->vcpu->vcpu_id);
apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
}
} else if (nmi_wd_enabled)
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
}
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
{
int ret = 0;
trace_kvm_apic_write(reg, val);
switch (reg) {
case APIC_ID: /* Local APIC ID */
if (!apic_x2apic_mode(apic))
kvm_apic_set_id(apic, val >> 24);
else
ret = 1;
break;
case APIC_TASKPRI:
report_tpr_access(apic, true);
apic_set_tpr(apic, val & 0xff);
break;
case APIC_EOI:
apic_set_eoi(apic);
break;
case APIC_LDR:
if (!apic_x2apic_mode(apic))
kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
else
ret = 1;
break;
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
recalculate_apic_map(apic->vcpu->kvm);
} else
ret = 1;
break;
case APIC_SPIV: {
u32 mask = 0x3ff;
if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
mask |= APIC_SPIV_DIRECTED_EOI;
apic_set_spiv(apic, val & mask);
if (!(val & APIC_SPIV_APIC_ENABLED)) {
int i;
u32 lvt_val;
for (i = 0; i < APIC_LVT_NUM; i++) {
lvt_val = kvm_apic_get_reg(apic,
APIC_LVTT + 0x10 * i);
apic_set_reg(apic, APIC_LVTT + 0x10 * i,
lvt_val | APIC_LVT_MASKED);
}
atomic_set(&apic->lapic_timer.pending, 0);
}
break;
}
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
apic_send_ipi(apic);
break;
case APIC_ICR2:
if (!apic_x2apic_mode(apic))
val &= 0xff000000;
apic_set_reg(apic, APIC_ICR2, val);
break;
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
case APIC_LVTERR:
/* TODO: Check vector */
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
apic_set_reg(apic, reg, val);
break;
case APIC_LVTT:
if ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) !=
(val & apic->lapic_timer.timer_mode_mask))
hrtimer_cancel(&apic->lapic_timer.timer);
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
apic_set_reg(apic, APIC_LVTT, val);
break;
case APIC_TMICT:
if (apic_lvtt_tscdeadline(apic))
break;
hrtimer_cancel(&apic->lapic_timer.timer);
apic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
case APIC_TDCR:
if (val & 4)
apic_debug("KVM_WRITE:TDCR %x\n", val);
apic_set_reg(apic, APIC_TDCR, val);
update_divide_count(apic);
break;
case APIC_ESR:
if (apic_x2apic_mode(apic) && val != 0) {
apic_debug("KVM_WRITE:ESR not zero %x\n", val);
ret = 1;
}
break;
case APIC_SELF_IPI:
if (apic_x2apic_mode(apic)) {
apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
} else
ret = 1;
break;
default:
ret = 1;
break;
}
if (ret)
apic_debug("Local APIC Write to read-only register %x\n", reg);
return ret;
}
static int apic_mmio_write(struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
struct kvm_lapic *apic = to_lapic(this);
unsigned int offset = address - apic->base_address;
u32 val;
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
* Refer SDM 8.4.1
*/
if (len != 4 || (offset & 0xf)) {
/* Don't shout loud, $infamous_os would cause only noise. */
apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
return 0;
}
val = *(u32*)data;
/* too common printing */
if (offset != APIC_EOI)
apic_debug("%s: offset 0x%x with length 0x%x, and value is "
"0x%x\n", __func__, offset, len, val);
apic_reg_write(apic, offset & 0xff0, val);
return 0;
}
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_has_lapic(vcpu))
apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
u32 val = 0;
/* hw has done the conditional check and inst decode */
offset &= 0xff0;
apic_reg_read(vcpu->arch.apic, offset, 4, &val);
/* TODO: optimize to just emulate side effect w/o one more write */
apic_reg_write(vcpu->arch.apic, offset, val);
}
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
void kvm_free_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!vcpu->arch.apic)
return;
hrtimer_cancel(&apic->lapic_timer.timer);
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_key_slow_dec_deferred(&apic_hw_disabled);
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
static_key_slow_dec_deferred(&apic_sw_disabled);
if (apic->regs)
free_page((unsigned long)apic->regs);
kfree(apic);
}
/*
*----------------------------------------------------------------------
* LAPIC interface
*----------------------------------------------------------------------
*/
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return 0;
return apic->lapic_timer.tscdeadline;
}
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;
hrtimer_cancel(&apic->lapic_timer.timer);
apic->lapic_timer.tscdeadline = data;
start_apic_timer(apic);
}
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return;
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
}
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
u64 tpr;
if (!kvm_vcpu_has_lapic(vcpu))
return 0;
tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
return (tpr & 0xf0) >> 4;
}
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
u64 old_value = vcpu->arch.apic_base;
struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic) {
value |= MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
return;
}
/* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else
static_key_slow_inc(&apic_hw_disabled.key);
recalculate_apic_map(vcpu->kvm);
}
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
kvm_apic_set_ldr(apic, ldr);
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
} else
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
}
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
/* with FSB delivery interrupt, we can restart APIC functionality */
apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
"0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
}
void kvm_lapic_reset(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
int i;
apic_debug("%s\n", __func__);
ASSERT(vcpu);
apic = vcpu->arch.apic;
ASSERT(apic != NULL);
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
kvm_apic_set_id(apic, vcpu->vcpu_id);
kvm_apic_set_version(apic->vcpu);
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_set_reg(apic, APIC_DFR, 0xffffffffU);
apic_set_spiv(apic, 0xff);
apic_set_reg(apic, APIC_TASKPRI, 0);
kvm_apic_set_ldr(apic, 0);
apic_set_reg(apic, APIC_ESR, 0);
apic_set_reg(apic, APIC_ICR, 0);
apic_set_reg(apic, APIC_ICR2, 0);
apic_set_reg(apic, APIC_TDCR, 0);
apic_set_reg(apic, APIC_TMICT, 0);
for (i = 0; i < 8; i++) {
apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
apic->highest_isr_cache = -1;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
if (kvm_vcpu_is_bsp(vcpu))
kvm_lapic_set_base(vcpu,
vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address);
}
/*
*----------------------------------------------------------------------
* timer interface
*----------------------------------------------------------------------
*/
static bool lapic_is_periodic(struct kvm_lapic *apic)
{
return apic_lvtt_period(apic);
}
int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) &&
apic_lvt_enabled(apic, APIC_LVTT))
return atomic_read(&apic->lapic_timer.pending);
return 0;
}
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = kvm_apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
NULL);
}
return 0;
}
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (apic)
kvm_apic_local_deliver(apic, APIC_LVT0);
}
static const struct kvm_io_device_ops apic_mmio_ops = {
.read = apic_mmio_read,
.write = apic_mmio_write,
};
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
struct kvm_vcpu *vcpu = apic->vcpu;
wait_queue_head_t *q = &vcpu->wq;
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest.
*/
if (!atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
/* FIXME: this code should not know anything about vcpus */
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
if (waitqueue_active(q))
wake_up_interruptible(q);
if (lapic_is_periodic(apic)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}
int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
ASSERT(vcpu != NULL);
apic_debug("apic_init %d\n", vcpu->vcpu_id);
apic = kzalloc(sizeof(*apic), GFP_KERNEL);
if (!apic)
goto nomem;
vcpu->arch.apic = apic;
apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
if (!apic->regs) {
printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
vcpu->vcpu_id);
goto nomem_free_apic;
}
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = apic_timer_fn;
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
* thinking that APIC satet has changed.
*/
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
kvm_lapic_set_base(vcpu,
APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
kvm_lapic_reset(vcpu);
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
return 0;
nomem_free_apic:
kfree(apic);
nomem:
return -ENOMEM;
}
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic))
return -1;
apic_update_ppr(apic);
highest_irr = apic_find_highest_irr(apic);
if ((highest_irr == -1) ||
((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
return -1;
return highest_irr;
}
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{
u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0);
int r = 0;
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
r = 1;
if ((lvt0 & APIC_LVT_MASKED) == 0 &&
GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
r = 1;
return r;
}
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return;
if (atomic_read(&apic->lapic_timer.pending) > 0) {
kvm_apic_local_deliver(apic, APIC_LVTT);
atomic_set(&apic->lapic_timer.pending, 0);
}
}
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{
int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
if (vector == -1)
return -1;
apic_set_isr(vector, apic);
apic_update_ppr(apic);
apic_clear_irr(vector, apic);
return vector;
}
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
struct kvm_lapic *apic = vcpu->arch.apic;
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
/* call kvm_apic_set_id() to put apic into apic_map */
kvm_apic_set_id(apic, kvm_apic_id(apic));
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer);
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_rtc_eoi_tracking_restore_one(vcpu);
}
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
struct hrtimer *timer;
if (!kvm_vcpu_has_lapic(vcpu))
return;
timer = &vcpu->arch.apic->lapic_timer.timer;
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/*
* apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
*
* Detect whether guest triggered PV EOI since the
* last entry. If yes, set EOI on guests's behalf.
* Clear PV EOI in guest memory in any case.
*/
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
bool pending;
int vector;
/*
* PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
* and KVM_PV_EOI_ENABLED in guest memory as follows:
*
* KVM_APIC_PV_EOI_PENDING is unset:
* -> host disabled PV EOI.
* KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
* -> host enabled PV EOI, guest did not execute EOI yet.
* KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
* -> host enabled PV EOI, guest executed EOI.
*/
BUG_ON(!pv_eoi_enabled(vcpu));
pending = pv_eoi_get_pending(vcpu);
/*
* Clear pending bit in any case: it will be set again on vmentry.
* While this might not be ideal from performance point of view,
* this makes sure pv eoi is only enabled when we know it's safe.
*/
pv_eoi_clr_pending(vcpu);
if (pending)
return;
vector = apic_set_eoi(apic);
trace_kvm_pv_eoi(apic, vector);
}
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
sizeof(u32));
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
/*
* apic_sync_pv_eoi_to_guest - called before vmentry
*
* Detect whether it's safe to enable PV EOI and
* if yes do so.
*/
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
if (!pv_eoi_enabled(vcpu) ||
/* IRR set or many bits in ISR: could be nested. */
apic->irr_pending ||
/* Cache not set: could be safe but we don't bother. */
apic->highest_isr_cache == -1 ||
/* Need EOI to update ioapic. */
kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
/*
* PV EOI was disabled by apic_sync_pv_eoi_from_guest
* so we need not do anything here.
*/
return;
}
pv_eoi_set_pending(apic->vcpu);
}
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic;
apic_sync_pv_eoi_to_guest(vcpu, apic);
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff;
max_irr = apic_find_highest_irr(apic);
if (max_irr < 0)
max_irr = 0;
max_isr = apic_find_highest_isr(apic);
if (max_isr < 0)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
sizeof(u32));
}
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
if (vapic_addr) {
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.apic->vapic_cache,
vapic_addr, sizeof(u32)))
return -EINVAL;
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
} else {
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
}
vcpu->arch.apic->vapic_addr = vapic_addr;
return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
/* if this is ICR write vector before command */
if (msr == 0x830)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
if (apic_reg_read(apic, reg, 4, &low))
return 1;
if (msr == 0x830)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
return 0;
}
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return 1;
/* if this is ICR write vector before command */
if (reg == APIC_ICR)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 low, high = 0;
if (!kvm_vcpu_has_lapic(vcpu))
return 1;
if (apic_reg_read(apic, reg, 4, &low))
return 1;
if (reg == APIC_ICR)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
return 0;
}
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
{
u64 addr = data & ~KVM_MSR_ENABLED;
if (!IS_ALIGNED(addr, 4))
return 1;
vcpu->arch.pv_eoi.msr_val = data;
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
addr, sizeof(u8));
}
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
unsigned int sipi_vector;
unsigned long pe;
if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
return;
pe = xchg(&apic->pending_events, 0);
if (test_bit(KVM_APIC_INIT, &pe)) {
kvm_lapic_reset(vcpu);
kvm_vcpu_reset(vcpu);
if (kvm_vcpu_is_bsp(apic->vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
}
if (test_bit(KVM_APIC_SIPI, &pe) &&
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
/* evaluate pending_events before reading the vector */
smp_rmb();
sipi_vector = apic->sipi_vector;
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, sipi_vector);
kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
void kvm_lapic_init(void)
{
/* do not patch jump label more than once per second */
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_5797_0 |
crossvul-cpp_data_good_2417_0 | /*
** Copyright (C) 2002-2014 Erik de Castro Lopo <erikd@mega-nerd.com>
** Copyright (C) 2003 Ross Bencina <rbencina@iprimus.com.au>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU Lesser General Public License as published by
** the Free Software Foundation; either version 2.1 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU Lesser General Public License for more details.
**
** You should have received a copy of the GNU Lesser General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
** The file is split into three sections as follows:
** - The top section (USE_WINDOWS_API == 0) for Linux, Unix and MacOSX
** systems (including Cygwin).
** - The middle section (USE_WINDOWS_API == 1) for microsoft windows
** (including MinGW) using the native windows API.
** - A legacy windows section which attempted to work around grevious
** bugs in microsoft's POSIX implementation.
*/
/*
** The header file sfconfig.h MUST be included before the others to ensure
** that large file support is enabled correctly on Unix systems.
*/
#include "sfconfig.h"
#include <stdio.h>
#include <stdlib.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#if (HAVE_DECL_S_IRGRP == 0)
#include <sf_unistd.h>
#endif
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/stat.h>
#include "sndfile.h"
#include "common.h"
#define SENSIBLE_SIZE (0x40000000)
/*
** Neat solution to the Win32/OS2 binary file flage requirement.
** If O_BINARY isn't already defined by the inclusion of the system
** headers, set it to zero.
*/
#ifndef O_BINARY
#define O_BINARY 0
#endif
static void psf_log_syserr (SF_PRIVATE *psf, int error) ;
#if (USE_WINDOWS_API == 0)
/*------------------------------------------------------------------------------
** Win32 stuff at the bottom of the file. Unix and other sensible OSes here.
*/
static int psf_close_fd (int fd) ;
static int psf_open_fd (PSF_FILE * pfile) ;
static sf_count_t psf_get_filelen_fd (int fd) ;
int
psf_fopen (SF_PRIVATE *psf)
{
psf->error = 0 ;
psf->file.filedes = psf_open_fd (&psf->file) ;
if (psf->file.filedes == - SFE_BAD_OPEN_MODE)
{ psf->error = SFE_BAD_OPEN_MODE ;
psf->file.filedes = -1 ;
return psf->error ;
} ;
if (psf->file.filedes == -1)
psf_log_syserr (psf, errno) ;
return psf->error ;
} /* psf_fopen */
int
psf_fclose (SF_PRIVATE *psf)
{ int retval ;
if (psf->virtual_io)
return 0 ;
if (psf->file.do_not_close_descriptor)
{ psf->file.filedes = -1 ;
return 0 ;
} ;
if ((retval = psf_close_fd (psf->file.filedes)) == -1)
psf_log_syserr (psf, errno) ;
psf->file.filedes = -1 ;
return retval ;
} /* psf_fclose */
int
psf_open_rsrc (SF_PRIVATE *psf)
{
if (psf->rsrc.filedes > 0)
return 0 ;
/* Test for MacOSX style resource fork on HPFS or HPFS+ filesystems. */
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s/..namedfork/rsrc", psf->file.path.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.filedes = psf_open_fd (&psf->rsrc)) >= 0)
{ psf->rsrclength = psf_get_filelen_fd (psf->rsrc.filedes) ;
if (psf->rsrclength > 0 || (psf->rsrc.mode & SFM_WRITE))
return SFE_NO_ERROR ;
psf_close_fd (psf->rsrc.filedes) ;
psf->rsrc.filedes = -1 ;
} ;
if (psf->rsrc.filedes == - SFE_BAD_OPEN_MODE)
{ psf->error = SFE_BAD_OPEN_MODE ;
return psf->error ;
} ;
/*
** Now try for a resource fork stored as a separate file in the same
** directory, but preceded with a dot underscore.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s._%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.filedes = psf_open_fd (&psf->rsrc)) >= 0)
{ psf->rsrclength = psf_get_filelen_fd (psf->rsrc.filedes) ;
return SFE_NO_ERROR ;
} ;
/*
** Now try for a resource fork stored in a separate file in the
** .AppleDouble/ directory.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s.AppleDouble/%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.filedes = psf_open_fd (&psf->rsrc)) >= 0)
{ psf->rsrclength = psf_get_filelen_fd (psf->rsrc.filedes) ;
return SFE_NO_ERROR ;
} ;
/* No resource file found. */
if (psf->rsrc.filedes == -1)
psf_log_syserr (psf, errno) ;
psf->rsrc.filedes = -1 ;
return psf->error ;
} /* psf_open_rsrc */
sf_count_t
psf_get_filelen (SF_PRIVATE *psf)
{ sf_count_t filelen ;
if (psf->virtual_io)
return psf->vio.get_filelen (psf->vio_user_data) ;
filelen = psf_get_filelen_fd (psf->file.filedes) ;
if (filelen == -1)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
if (filelen == -SFE_BAD_STAT_SIZE)
{ psf->error = SFE_BAD_STAT_SIZE ;
return (sf_count_t) -1 ;
} ;
switch (psf->file.mode)
{ case SFM_WRITE :
filelen = filelen - psf->fileoffset ;
break ;
case SFM_READ :
if (psf->fileoffset > 0 && psf->filelength > 0)
filelen = psf->filelength ;
break ;
case SFM_RDWR :
/*
** Cannot open embedded files SFM_RDWR so we don't need to
** subtract psf->fileoffset. We already have the answer we
** need.
*/
break ;
default :
/* Shouldn't be here, so return error. */
filelen = -1 ;
} ;
return filelen ;
} /* psf_get_filelen */
int
psf_close_rsrc (SF_PRIVATE *psf)
{ psf_close_fd (psf->rsrc.filedes) ;
psf->rsrc.filedes = -1 ;
return 0 ;
} /* psf_close_rsrc */
int
psf_set_stdio (SF_PRIVATE *psf)
{ int error = 0 ;
switch (psf->file.mode)
{ case SFM_RDWR :
error = SFE_OPEN_PIPE_RDWR ;
break ;
case SFM_READ :
psf->file.filedes = 0 ;
break ;
case SFM_WRITE :
psf->file.filedes = 1 ;
break ;
default :
error = SFE_BAD_OPEN_MODE ;
break ;
} ;
psf->filelength = 0 ;
return error ;
} /* psf_set_stdio */
void
psf_set_file (SF_PRIVATE *psf, int fd)
{ psf->file.filedes = fd ;
} /* psf_set_file */
int
psf_file_valid (SF_PRIVATE *psf)
{ return (psf->file.filedes >= 0) ? SF_TRUE : SF_FALSE ;
} /* psf_set_file */
sf_count_t
psf_fseek (SF_PRIVATE *psf, sf_count_t offset, int whence)
{ sf_count_t current_pos, new_position ;
if (psf->virtual_io)
return psf->vio.seek (offset, whence, psf->vio_user_data) ;
current_pos = psf_ftell (psf) ;
switch (whence)
{ case SEEK_SET :
offset += psf->fileoffset ;
break ;
case SEEK_END :
if (psf->file.mode == SFM_WRITE)
{ new_position = lseek (psf->file.filedes, offset, whence) ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
return new_position - psf->fileoffset ;
} ;
/* Transform SEEK_END into a SEEK_SET, ie find the file
** length add the requested offset (should be <= 0) to
** get the offset wrt the start of file.
*/
whence = SEEK_SET ;
offset = lseek (psf->file.filedes, 0, SEEK_END) + offset ;
break ;
case SEEK_CUR :
/* Translate a SEEK_CUR into a SEEK_SET. */
offset += current_pos ;
whence = SEEK_SET ;
break ;
default :
/* We really should not be here. */
psf_log_printf (psf, "psf_fseek : whence is %d *****.\n", whence) ;
return 0 ;
} ;
if (current_pos != offset)
new_position = lseek (psf->file.filedes, offset, whence) ;
else
new_position = offset ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
new_position -= psf->fileoffset ;
return new_position ;
} /* psf_fseek */
sf_count_t
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the read down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
count = read (psf->file.filedes, ((char*) ptr) + total, (size_t) count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fread */
sf_count_t
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (bytes == 0 || items == 0)
return 0 ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : items ;
count = write (psf->file.filedes, ((const char*) ptr) + total, count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fwrite */
sf_count_t
psf_ftell (SF_PRIVATE *psf)
{ sf_count_t pos ;
if (psf->virtual_io)
return psf->vio.tell (psf->vio_user_data) ;
if (psf->is_pipe)
return psf->pipeoffset ;
pos = lseek (psf->file.filedes, 0, SEEK_CUR) ;
if (pos == ((sf_count_t) -1))
{ psf_log_syserr (psf, errno) ;
return -1 ;
} ;
return pos - psf->fileoffset ;
} /* psf_ftell */
static int
psf_close_fd (int fd)
{ int retval ;
if (fd < 0)
return 0 ;
while ((retval = close (fd)) == -1 && errno == EINTR)
/* Do nothing. */ ;
return retval ;
} /* psf_close_fd */
sf_count_t
psf_fgets (char *buffer, sf_count_t bufsize, SF_PRIVATE *psf)
{ sf_count_t k = 0 ;
sf_count_t count ;
while (k < bufsize - 1)
{ count = read (psf->file.filedes, &(buffer [k]), 1) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0 || buffer [k++] == '\n')
break ;
} ;
buffer [k] = 0 ;
return k ;
} /* psf_fgets */
int
psf_is_pipe (SF_PRIVATE *psf)
{ struct stat statbuf ;
if (psf->virtual_io)
return SF_FALSE ;
if (fstat (psf->file.filedes, &statbuf) == -1)
{ psf_log_syserr (psf, errno) ;
/* Default to maximum safety. */
return SF_TRUE ;
} ;
if (S_ISFIFO (statbuf.st_mode) || S_ISSOCK (statbuf.st_mode))
return SF_TRUE ;
return SF_FALSE ;
} /* psf_is_pipe */
static sf_count_t
psf_get_filelen_fd (int fd)
{
#if (SIZEOF_OFF_T == 4 && SIZEOF_SF_COUNT_T == 8 && HAVE_FSTAT64)
struct stat64 statbuf ;
if (fstat64 (fd, &statbuf) == -1)
return (sf_count_t) -1 ;
return statbuf.st_size ;
#else
struct stat statbuf ;
if (fstat (fd, &statbuf) == -1)
return (sf_count_t) -1 ;
return statbuf.st_size ;
#endif
} /* psf_get_filelen_fd */
int
psf_ftruncate (SF_PRIVATE *psf, sf_count_t len)
{ int retval ;
/* Returns 0 on success, non-zero on failure. */
if (len < 0)
return -1 ;
if ((sizeof (off_t) < sizeof (sf_count_t)) && len > 0x7FFFFFFF)
return -1 ;
retval = ftruncate (psf->file.filedes, len) ;
if (retval == -1)
psf_log_syserr (psf, errno) ;
return retval ;
} /* psf_ftruncate */
void
psf_init_files (SF_PRIVATE *psf)
{ psf->file.filedes = -1 ;
psf->rsrc.filedes = -1 ;
psf->file.savedes = -1 ;
} /* psf_init_files */
void
psf_use_rsrc (SF_PRIVATE *psf, int on_off)
{
if (on_off)
{ if (psf->file.filedes != psf->rsrc.filedes)
{ psf->file.savedes = psf->file.filedes ;
psf->file.filedes = psf->rsrc.filedes ;
} ;
}
else if (psf->file.filedes == psf->rsrc.filedes)
psf->file.filedes = psf->file.savedes ;
return ;
} /* psf_use_rsrc */
static int
psf_open_fd (PSF_FILE * pfile)
{ int fd, oflag, mode ;
/*
** Sanity check. If everything is OK, this test and the printfs will
** be optimised out. This is meant to catch the problems caused by
** "sfconfig.h" being included after <stdio.h>.
*/
if (sizeof (sf_count_t) != 8)
{ puts ("\n\n*** Fatal error : sizeof (sf_count_t) != 8") ;
puts ("*** This means that libsndfile was not configured correctly.\n") ;
exit (1) ;
} ;
switch (pfile->mode)
{ case SFM_READ :
oflag = O_RDONLY | O_BINARY ;
mode = 0 ;
break ;
case SFM_WRITE :
oflag = O_WRONLY | O_CREAT | O_TRUNC | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH ;
break ;
case SFM_RDWR :
oflag = O_RDWR | O_CREAT | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH ;
break ;
default :
return - SFE_BAD_OPEN_MODE ;
break ;
} ;
if (mode == 0)
fd = open (pfile->path.c, oflag) ;
else
fd = open (pfile->path.c, oflag, mode) ;
return fd ;
} /* psf_open_fd */
static void
psf_log_syserr (SF_PRIVATE *psf, int error)
{
/* Only log an error if no error has been set yet. */
if (psf->error == 0)
{ psf->error = SFE_SYSTEM ;
snprintf (psf->syserr, sizeof (psf->syserr), "System error : %s.", strerror (error)) ;
} ;
return ;
} /* psf_log_syserr */
void
psf_fsync (SF_PRIVATE *psf)
{
#if HAVE_FSYNC
if (psf->file.mode == SFM_WRITE || psf->file.mode == SFM_RDWR)
fsync (psf->file.filedes) ;
#else
psf = NULL ;
#endif
} /* psf_fsync */
#elif USE_WINDOWS_API
/* Win32 file i/o functions implemented using native Win32 API */
#include <windows.h>
#include <io.h>
static int psf_close_handle (HANDLE handle) ;
static HANDLE psf_open_handle (PSF_FILE * pfile) ;
static sf_count_t psf_get_filelen_handle (HANDLE handle) ;
/* USE_WINDOWS_API */ int
psf_fopen (SF_PRIVATE *psf)
{
psf->error = 0 ;
psf->file.handle = psf_open_handle (&psf->file) ;
if (psf->file.handle == NULL)
psf_log_syserr (psf, GetLastError ()) ;
return psf->error ;
} /* psf_fopen */
/* USE_WINDOWS_API */ int
psf_fclose (SF_PRIVATE *psf)
{ int retval ;
if (psf->virtual_io)
return 0 ;
if (psf->file.do_not_close_descriptor)
{ psf->file.handle = NULL ;
return 0 ;
} ;
if ((retval = psf_close_handle (psf->file.handle)) == -1)
psf_log_syserr (psf, GetLastError ()) ;
psf->file.handle = NULL ;
return retval ;
} /* psf_fclose */
/* USE_WINDOWS_API */ int
psf_open_rsrc (SF_PRIVATE *psf)
{
if (psf->rsrc.handle != NULL)
return 0 ;
/* Test for MacOSX style resource fork on HPFS or HPFS+ filesystems. */
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s/rsrc", psf->file.path.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.handle = psf_open_handle (&psf->rsrc)) != NULL)
{ psf->rsrclength = psf_get_filelen_handle (psf->rsrc.handle) ;
return SFE_NO_ERROR ;
} ;
/*
** Now try for a resource fork stored as a separate file in the same
** directory, but preceded with a dot underscore.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s._%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.handle = psf_open_handle (&psf->rsrc)) != NULL)
{ psf->rsrclength = psf_get_filelen_handle (psf->rsrc.handle) ;
return SFE_NO_ERROR ;
} ;
/*
** Now try for a resource fork stored in a separate file in the
** .AppleDouble/ directory.
*/
snprintf (psf->rsrc.path.c, sizeof (psf->rsrc.path.c), "%s.AppleDouble/%s", psf->file.dir.c, psf->file.name.c) ;
psf->error = SFE_NO_ERROR ;
if ((psf->rsrc.handle = psf_open_handle (&psf->rsrc)) != NULL)
{ psf->rsrclength = psf_get_filelen_handle (psf->rsrc.handle) ;
return SFE_NO_ERROR ;
} ;
/* No resource file found. */
if (psf->rsrc.handle == NULL)
psf_log_syserr (psf, GetLastError ()) ;
psf->rsrc.handle = NULL ;
return psf->error ;
} /* psf_open_rsrc */
/* USE_WINDOWS_API */ sf_count_t
psf_get_filelen (SF_PRIVATE *psf)
{ sf_count_t filelen ;
if (psf->virtual_io)
return psf->vio.get_filelen (psf->vio_user_data) ;
filelen = psf_get_filelen_handle (psf->file.handle) ;
if (filelen == -1)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
if (filelen == -SFE_BAD_STAT_SIZE)
{ psf->error = SFE_BAD_STAT_SIZE ;
return (sf_count_t) -1 ;
} ;
switch (psf->file.mode)
{ case SFM_WRITE :
filelen = filelen - psf->fileoffset ;
break ;
case SFM_READ :
if (psf->fileoffset > 0 && psf->filelength > 0)
filelen = psf->filelength ;
break ;
case SFM_RDWR :
/*
** Cannot open embedded files SFM_RDWR so we don't need to
** subtract psf->fileoffset. We already have the answer we
** need.
*/
break ;
default :
/* Shouldn't be here, so return error. */
filelen = -1 ;
} ;
return filelen ;
} /* psf_get_filelen */
/* USE_WINDOWS_API */ void
psf_init_files (SF_PRIVATE *psf)
{ psf->file.handle = NULL ;
psf->rsrc.handle = NULL ;
psf->file.hsaved = NULL ;
} /* psf_init_files */
/* USE_WINDOWS_API */ void
psf_use_rsrc (SF_PRIVATE *psf, int on_off)
{
if (on_off)
{ if (psf->file.handle != psf->rsrc.handle)
{ psf->file.hsaved = psf->file.handle ;
psf->file.handle = psf->rsrc.handle ;
} ;
}
else if (psf->file.handle == psf->rsrc.handle)
psf->file.handle = psf->file.hsaved ;
return ;
} /* psf_use_rsrc */
/* USE_WINDOWS_API */ static HANDLE
psf_open_handle (PSF_FILE * pfile)
{ DWORD dwDesiredAccess ;
DWORD dwShareMode ;
DWORD dwCreationDistribution ;
HANDLE handle ;
switch (pfile->mode)
{ case SFM_READ :
dwDesiredAccess = GENERIC_READ ;
dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE ;
dwCreationDistribution = OPEN_EXISTING ;
break ;
case SFM_WRITE :
dwDesiredAccess = GENERIC_WRITE ;
dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE ;
dwCreationDistribution = CREATE_ALWAYS ;
break ;
case SFM_RDWR :
dwDesiredAccess = GENERIC_READ | GENERIC_WRITE ;
dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE ;
dwCreationDistribution = OPEN_ALWAYS ;
break ;
default :
return NULL ;
} ;
if (pfile->use_wchar)
handle = CreateFileW (
pfile->path.wc, /* pointer to name of the file */
dwDesiredAccess, /* access (read-write) mode */
dwShareMode, /* share mode */
0, /* pointer to security attributes */
dwCreationDistribution, /* how to create */
FILE_ATTRIBUTE_NORMAL, /* file attributes (could use FILE_FLAG_SEQUENTIAL_SCAN) */
NULL /* handle to file with attributes to copy */
) ;
else
handle = CreateFile (
pfile->path.c, /* pointer to name of the file */
dwDesiredAccess, /* access (read-write) mode */
dwShareMode, /* share mode */
0, /* pointer to security attributes */
dwCreationDistribution, /* how to create */
FILE_ATTRIBUTE_NORMAL, /* file attributes (could use FILE_FLAG_SEQUENTIAL_SCAN) */
NULL /* handle to file with attributes to copy */
) ;
if (handle == INVALID_HANDLE_VALUE)
return NULL ;
return handle ;
} /* psf_open_handle */
/* USE_WINDOWS_API */ static void
psf_log_syserr (SF_PRIVATE *psf, int error)
{ LPVOID lpMsgBuf ;
/* Only log an error if no error has been set yet. */
if (psf->error == 0)
{ psf->error = SFE_SYSTEM ;
FormatMessage (
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
error,
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &lpMsgBuf,
0,
NULL
) ;
snprintf (psf->syserr, sizeof (psf->syserr), "System error : %s", (char*) lpMsgBuf) ;
LocalFree (lpMsgBuf) ;
} ;
return ;
} /* psf_log_syserr */
/* USE_WINDOWS_API */ int
psf_close_rsrc (SF_PRIVATE *psf)
{ psf_close_handle (psf->rsrc.handle) ;
psf->rsrc.handle = NULL ;
return 0 ;
} /* psf_close_rsrc */
/* USE_WINDOWS_API */ int
psf_set_stdio (SF_PRIVATE *psf)
{ HANDLE handle = NULL ;
int error = 0 ;
switch (psf->file.mode)
{ case SFM_RDWR :
error = SFE_OPEN_PIPE_RDWR ;
break ;
case SFM_READ :
handle = GetStdHandle (STD_INPUT_HANDLE) ;
psf->file.do_not_close_descriptor = 1 ;
break ;
case SFM_WRITE :
handle = GetStdHandle (STD_OUTPUT_HANDLE) ;
psf->file.do_not_close_descriptor = 1 ;
break ;
default :
error = SFE_BAD_OPEN_MODE ;
break ;
} ;
psf->file.handle = handle ;
psf->filelength = 0 ;
return error ;
} /* psf_set_stdio */
/* USE_WINDOWS_API */ void
psf_set_file (SF_PRIVATE *psf, int fd)
{ HANDLE handle ;
intptr_t osfhandle ;
osfhandle = _get_osfhandle (fd) ;
handle = (HANDLE) osfhandle ;
psf->file.handle = handle ;
} /* psf_set_file */
/* USE_WINDOWS_API */ int
psf_file_valid (SF_PRIVATE *psf)
{ if (psf->file.handle == NULL)
return SF_FALSE ;
if (psf->file.handle == INVALID_HANDLE_VALUE)
return SF_FALSE ;
return SF_TRUE ;
} /* psf_set_file */
/* USE_WINDOWS_API */ sf_count_t
psf_fseek (SF_PRIVATE *psf, sf_count_t offset, int whence)
{ sf_count_t new_position ;
LONG lDistanceToMove, lDistanceToMoveHigh ;
DWORD dwMoveMethod ;
DWORD dwResult, dwError ;
if (psf->virtual_io)
return psf->vio.seek (offset, whence, psf->vio_user_data) ;
switch (whence)
{ case SEEK_SET :
offset += psf->fileoffset ;
dwMoveMethod = FILE_BEGIN ;
break ;
case SEEK_END :
dwMoveMethod = FILE_END ;
break ;
default :
dwMoveMethod = FILE_CURRENT ;
break ;
} ;
lDistanceToMove = (DWORD) (offset & 0xFFFFFFFF) ;
lDistanceToMoveHigh = (DWORD) ((offset >> 32) & 0xFFFFFFFF) ;
dwResult = SetFilePointer (psf->file.handle, lDistanceToMove, &lDistanceToMoveHigh, dwMoveMethod) ;
if (dwResult == 0xFFFFFFFF)
dwError = GetLastError () ;
else
dwError = NO_ERROR ;
if (dwError != NO_ERROR)
{ psf_log_syserr (psf, dwError) ;
return -1 ;
} ;
new_position = (dwResult + ((__int64) lDistanceToMoveHigh << 32)) - psf->fileoffset ;
return new_position ;
} /* psf_fseek */
/* USE_WINDOWS_API */ sf_count_t
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
DWORD dwNumberOfBytesRead ;
if (psf->virtual_io)
return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
if (ReadFile (psf->file.handle, ((char*) ptr) + total, count, &dwNumberOfBytesRead, 0) == 0)
{ psf_log_syserr (psf, GetLastError ()) ;
break ;
}
else
count = dwNumberOfBytesRead ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fread */
/* USE_WINDOWS_API */ sf_count_t
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
DWORD dwNumberOfBytesWritten ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes * items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
if (WriteFile (psf->file.handle, ((const char*) ptr) + total, count, &dwNumberOfBytesWritten, 0) == 0)
{ psf_log_syserr (psf, GetLastError ()) ;
break ;
}
else
count = dwNumberOfBytesWritten ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
if (psf->is_pipe)
psf->pipeoffset += total ;
return total / bytes ;
} /* psf_fwrite */
/* USE_WINDOWS_API */ sf_count_t
psf_ftell (SF_PRIVATE *psf)
{ sf_count_t pos ;
LONG lDistanceToMoveLow, lDistanceToMoveHigh ;
DWORD dwResult, dwError ;
if (psf->virtual_io)
return psf->vio.tell (psf->vio_user_data) ;
if (psf->is_pipe)
return psf->pipeoffset ;
lDistanceToMoveLow = 0 ;
lDistanceToMoveHigh = 0 ;
dwResult = SetFilePointer (psf->file.handle, lDistanceToMoveLow, &lDistanceToMoveHigh, FILE_CURRENT) ;
if (dwResult == 0xFFFFFFFF)
dwError = GetLastError () ;
else
dwError = NO_ERROR ;
if (dwError != NO_ERROR)
{ psf_log_syserr (psf, dwError) ;
return -1 ;
} ;
pos = (dwResult + ((__int64) lDistanceToMoveHigh << 32)) ;
return pos - psf->fileoffset ;
} /* psf_ftell */
/* USE_WINDOWS_API */ static int
psf_close_handle (HANDLE handle)
{ if (handle == NULL)
return 0 ;
if (CloseHandle (handle) == 0)
return -1 ;
return 0 ;
} /* psf_close_handle */
/* USE_WINDOWS_API */ sf_count_t
psf_fgets (char *buffer, sf_count_t bufsize, SF_PRIVATE *psf)
{ sf_count_t k = 0 ;
sf_count_t count ;
DWORD dwNumberOfBytesRead ;
while (k < bufsize - 1)
{ if (ReadFile (psf->file.handle, &(buffer [k]), 1, &dwNumberOfBytesRead, 0) == 0)
{ psf_log_syserr (psf, GetLastError ()) ;
break ;
}
else
{ count = dwNumberOfBytesRead ;
/* note that we only check for '\n' not other line endings such as CRLF */
if (count == 0 || buffer [k++] == '\n')
break ;
} ;
} ;
buffer [k] = 0 ;
return k ;
} /* psf_fgets */
/* USE_WINDOWS_API */ int
psf_is_pipe (SF_PRIVATE *psf)
{
if (psf->virtual_io)
return SF_FALSE ;
if (GetFileType (psf->file.handle) == FILE_TYPE_DISK)
return SF_FALSE ;
/* Default to maximum safety. */
return SF_TRUE ;
} /* psf_is_pipe */
/* USE_WINDOWS_API */ sf_count_t
psf_get_filelen_handle (HANDLE handle)
{ sf_count_t filelen ;
DWORD dwFileSizeLow, dwFileSizeHigh, dwError = NO_ERROR ;
dwFileSizeLow = GetFileSize (handle, &dwFileSizeHigh) ;
if (dwFileSizeLow == 0xFFFFFFFF)
dwError = GetLastError () ;
if (dwError != NO_ERROR)
return (sf_count_t) -1 ;
filelen = dwFileSizeLow + ((__int64) dwFileSizeHigh << 32) ;
return filelen ;
} /* psf_get_filelen_handle */
/* USE_WINDOWS_API */ void
psf_fsync (SF_PRIVATE *psf)
{ FlushFileBuffers (psf->file.handle) ;
} /* psf_fsync */
/* USE_WINDOWS_API */ int
psf_ftruncate (SF_PRIVATE *psf, sf_count_t len)
{ int retval = 0 ;
LONG lDistanceToMoveLow, lDistanceToMoveHigh ;
DWORD dwResult, dwError = NO_ERROR ;
/* This implementation trashes the current file position.
** should it save and restore it? what if the current position is past
** the new end of file?
*/
/* Returns 0 on success, non-zero on failure. */
if (len < 0)
return 1 ;
lDistanceToMoveLow = (DWORD) (len & 0xFFFFFFFF) ;
lDistanceToMoveHigh = (DWORD) ((len >> 32) & 0xFFFFFFFF) ;
dwResult = SetFilePointer (psf->file.handle, lDistanceToMoveLow, &lDistanceToMoveHigh, FILE_BEGIN) ;
if (dwResult == 0xFFFFFFFF)
dwError = GetLastError () ;
if (dwError != NO_ERROR)
{ retval = -1 ;
psf_log_syserr (psf, dwError) ;
}
else
{ /* Note: when SetEndOfFile is used to extend a file, the contents of the
** new portion of the file is undefined. This is unlike chsize(),
** which guarantees that the new portion of the file will be zeroed.
** Not sure if this is important or not.
*/
if (SetEndOfFile (psf->file.handle) == 0)
{ retval = -1 ;
psf_log_syserr (psf, GetLastError ()) ;
} ;
} ;
return retval ;
} /* psf_ftruncate */
#else
/* Win32 file i/o functions implemented using Unix-style file i/o API */
/* Win32 has a 64 file offset seek function:
**
** __int64 _lseeki64 (int handle, __int64 offset, int origin) ;
**
** It also has a 64 bit fstat function:
**
** int fstati64 (int, struct _stati64) ;
**
** but the fscking thing doesn't work!!!!! The file size parameter returned
** by this function is only valid up until more data is written at the end of
** the file. That makes this function completely 100% useless.
*/
#include <io.h>
#include <direct.h>
/* Win32 */ int
psf_fopen (SF_PRIVATE *psf, const char *pathname, int open_mode)
{ int oflag, mode ;
switch (open_mode)
{ case SFM_READ :
oflag = O_RDONLY | O_BINARY ;
mode = 0 ;
break ;
case SFM_WRITE :
oflag = O_WRONLY | O_CREAT | O_TRUNC | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP ;
break ;
case SFM_RDWR :
oflag = O_RDWR | O_CREAT | O_BINARY ;
mode = S_IRUSR | S_IWUSR | S_IRGRP ;
break ;
default :
psf->error = SFE_BAD_OPEN_MODE ;
return -1 ;
break ;
} ;
if (mode == 0)
psf->file.filedes = open (pathname, oflag) ;
else
psf->file.filedes = open (pathname, oflag, mode) ;
if (psf->file.filedes == -1)
psf_log_syserr (psf, errno) ;
return psf->file.filedes ;
} /* psf_fopen */
/* Win32 */ sf_count_t
psf_fseek (SF_PRIVATE *psf, sf_count_t offset, int whence)
{ sf_count_t new_position ;
if (psf->virtual_io)
return psf->vio.seek (offset, whence, psf->vio_user_data) ;
switch (whence)
{ case SEEK_SET :
offset += psf->fileoffset ;
break ;
case SEEK_END :
if (psf->file.mode == SFM_WRITE)
{ new_position = _lseeki64 (psf->file.filedes, offset, whence) ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
return new_position - psf->fileoffset ;
} ;
/* Transform SEEK_END into a SEEK_SET, ie find the file
** length add the requested offset (should be <= 0) to
** get the offset wrt the start of file.
*/
whence = SEEK_SET ;
offset = _lseeki64 (psf->file.filedes, 0, SEEK_END) + offset ;
break ;
default :
/* No need to do anything about SEEK_CUR. */
break ;
} ;
/*
** Bypass weird Win32-ism if necessary.
** _lseeki64() returns an "invalid parameter" error if called with the
** offset == 0 and whence == SEEK_CUR.
*** Use the _telli64() function instead.
*/
if (offset == 0 && whence == SEEK_CUR)
new_position = _telli64 (psf->file.filedes) ;
else
new_position = _lseeki64 (psf->file.filedes, offset, whence) ;
if (new_position < 0)
psf_log_syserr (psf, errno) ;
new_position -= psf->fileoffset ;
return new_position ;
} /* psf_fseek */
/* Win32 */ sf_count_t
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ;
count = read (psf->file.filedes, ((char*) ptr) + total, (size_t) count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
return total / bytes ;
} /* psf_fread */
/* Win32 */ sf_count_t
psf_fwrite (const void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf)
{ sf_count_t total = 0 ;
ssize_t count ;
if (psf->virtual_io)
return psf->vio.write (ptr, bytes*items, psf->vio_user_data) / bytes ;
items *= bytes ;
/* Do this check after the multiplication above. */
if (items <= 0)
return 0 ;
while (items > 0)
{ /* Break the writes down to a sensible size. */
count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : items ;
count = write (psf->file.filedes, ((const char*) ptr) + total, count) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0)
break ;
total += count ;
items -= count ;
} ;
return total / bytes ;
} /* psf_fwrite */
/* Win32 */ sf_count_t
psf_ftell (SF_PRIVATE *psf)
{ sf_count_t pos ;
if (psf->virtual_io)
return psf->vio.tell (psf->vio_user_data) ;
pos = _telli64 (psf->file.filedes) ;
if (pos == ((sf_count_t) -1))
{ psf_log_syserr (psf, errno) ;
return -1 ;
} ;
return pos - psf->fileoffset ;
} /* psf_ftell */
/* Win32 */ int
psf_fclose (SF_PRIVATE *psf)
{ int retval ;
while ((retval = close (psf->file.filedes)) == -1 && errno == EINTR)
/* Do nothing. */ ;
if (retval == -1)
psf_log_syserr (psf, errno) ;
psf->file.filedes = -1 ;
return retval ;
} /* psf_fclose */
/* Win32 */ sf_count_t
psf_fgets (char *buffer, sf_count_t bufsize, SF_PRIVATE *psf)
{ sf_count_t k = 0 ;
sf_count_t count ;
while (k < bufsize - 1)
{ count = read (psf->file.filedes, &(buffer [k]), 1) ;
if (count == -1)
{ if (errno == EINTR)
continue ;
psf_log_syserr (psf, errno) ;
break ;
} ;
if (count == 0 || buffer [k++] == '\n')
break ;
} ;
buffer [k] = 0 ;
return k ;
} /* psf_fgets */
/* Win32 */ int
psf_is_pipe (SF_PRIVATE *psf)
{ struct stat statbuf ;
if (psf->virtual_io)
return SF_FALSE ;
/* Not sure if this works. */
if (fstat (psf->file.filedes, &statbuf) == -1)
{ psf_log_syserr (psf, errno) ;
/* Default to maximum safety. */
return SF_TRUE ;
} ;
/* These macros are defined in Win32/unistd.h. */
if (S_ISFIFO (statbuf.st_mode) || S_ISSOCK (statbuf.st_mode))
return SF_TRUE ;
return SF_FALSE ;
} /* psf_checkpipe */
/* Win32 */ sf_count_t
psf_get_filelen (SF_PRIVATE *psf)
{
#if 0
/*
** Windoze is SOOOOO FUCKED!!!!!!!
** This code should work but doesn't. Why?
** Code below does work.
*/
struct _stati64 statbuf ;
if (_fstati64 (psf->file.filedes, &statbuf))
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
return statbuf.st_size ;
#else
sf_count_t current, filelen ;
if (psf->virtual_io)
return psf->vio.get_filelen (psf->vio_user_data) ;
if ((current = _telli64 (psf->file.filedes)) < 0)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
/*
** Lets face it, windoze if FUBAR!!!
**
** For some reason, I have to call _lseeki64() TWICE to get to the
** end of the file.
**
** This might have been avoided if windows had implemented the POSIX
** standard function fsync() but NO, that would have been too easy.
**
** I am VERY close to saying that windoze will no longer be supported
** by libsndfile and changing the license to GPL at the same time.
*/
_lseeki64 (psf->file.filedes, 0, SEEK_END) ;
if ((filelen = _lseeki64 (psf->file.filedes, 0, SEEK_END)) < 0)
{ psf_log_syserr (psf, errno) ;
return (sf_count_t) -1 ;
} ;
if (filelen > current)
_lseeki64 (psf->file.filedes, current, SEEK_SET) ;
switch (psf->file.mode)
{ case SFM_WRITE :
filelen = filelen - psf->fileoffset ;
break ;
case SFM_READ :
if (psf->fileoffset > 0 && psf->filelength > 0)
filelen = psf->filelength ;
break ;
case SFM_RDWR :
/*
** Cannot open embedded files SFM_RDWR so we don't need to
** subtract psf->fileoffset. We already have the answer we
** need.
*/
break ;
default :
filelen = 0 ;
} ;
return filelen ;
#endif
} /* psf_get_filelen */
/* Win32 */ int
psf_ftruncate (SF_PRIVATE *psf, sf_count_t len)
{ int retval ;
/* Returns 0 on success, non-zero on failure. */
if (len < 0)
return 1 ;
/* The global village idiots at micorsoft decided to implement
** nearly all the required 64 bit file offset functions except
** for one, truncate. The fscking morons!
**
** This is not 64 bit file offset clean. Somone needs to clean
** this up.
*/
if (len > 0x7FFFFFFF)
return -1 ;
retval = chsize (psf->file.filedes, len) ;
if (retval == -1)
psf_log_syserr (psf, errno) ;
return retval ;
} /* psf_ftruncate */
static void
psf_log_syserr (SF_PRIVATE *psf, int error)
{
/* Only log an error if no error has been set yet. */
if (psf->error == 0)
{ psf->error = SFE_SYSTEM ;
snprintf (psf->syserr, sizeof (psf->syserr), "System error : %s", strerror (error)) ;
} ;
return ;
} /* psf_log_syserr */
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2417_0 |
crossvul-cpp_data_bad_2219_0 | /*
* Routines for driver control interface
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/threads.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/info.h>
#include <sound/control.h>
/* max number of user-defined controls */
#define MAX_USER_CONTROLS 32
#define MAX_CONTROL_COUNT 1028
struct snd_kctl_ioctl {
struct list_head list; /* list of all ioctls */
snd_kctl_ioctl_func_t fioctl;
};
static DECLARE_RWSEM(snd_ioctl_rwsem);
static LIST_HEAD(snd_control_ioctls);
#ifdef CONFIG_COMPAT
static LIST_HEAD(snd_control_compat_ioctls);
#endif
static int snd_ctl_open(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL);
if (!card) {
err = -ENODEV;
goto __error1;
}
err = snd_card_file_add(card, file);
if (err < 0) {
err = -ENODEV;
goto __error1;
}
if (!try_module_get(card->module)) {
err = -EFAULT;
goto __error2;
}
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (ctl == NULL) {
err = -ENOMEM;
goto __error;
}
INIT_LIST_HEAD(&ctl->events);
init_waitqueue_head(&ctl->change_sleep);
spin_lock_init(&ctl->read_lock);
ctl->card = card;
ctl->prefer_pcm_subdevice = -1;
ctl->prefer_rawmidi_subdevice = -1;
ctl->pid = get_pid(task_pid(current));
file->private_data = ctl;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_add_tail(&ctl->list, &card->ctl_files);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
snd_card_unref(card);
return 0;
__error:
module_put(card->module);
__error2:
snd_card_file_remove(card, file);
__error1:
if (card)
snd_card_unref(card);
return err;
}
static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl)
{
unsigned long flags;
struct snd_kctl_event *cread;
spin_lock_irqsave(&ctl->read_lock, flags);
while (!list_empty(&ctl->events)) {
cread = snd_kctl_event(ctl->events.next);
list_del(&cread->list);
kfree(cread);
}
spin_unlock_irqrestore(&ctl->read_lock, flags);
}
static int snd_ctl_release(struct inode *inode, struct file *file)
{
unsigned long flags;
struct snd_card *card;
struct snd_ctl_file *ctl;
struct snd_kcontrol *control;
unsigned int idx;
ctl = file->private_data;
file->private_data = NULL;
card = ctl->card;
write_lock_irqsave(&card->ctl_files_rwlock, flags);
list_del(&ctl->list);
write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
down_write(&card->controls_rwsem);
list_for_each_entry(control, &card->controls, list)
for (idx = 0; idx < control->count; idx++)
if (control->vd[idx].owner == ctl)
control->vd[idx].owner = NULL;
up_write(&card->controls_rwsem);
snd_ctl_empty_read_queue(ctl);
put_pid(ctl->pid);
kfree(ctl);
module_put(card->module);
snd_card_file_remove(card, file);
return 0;
}
void snd_ctl_notify(struct snd_card *card, unsigned int mask,
struct snd_ctl_elem_id *id)
{
unsigned long flags;
struct snd_ctl_file *ctl;
struct snd_kctl_event *ev;
if (snd_BUG_ON(!card || !id))
return;
read_lock(&card->ctl_files_rwlock);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
card->mixer_oss_change_count++;
#endif
list_for_each_entry(ctl, &card->ctl_files, list) {
if (!ctl->subscribed)
continue;
spin_lock_irqsave(&ctl->read_lock, flags);
list_for_each_entry(ev, &ctl->events, list) {
if (ev->id.numid == id->numid) {
ev->mask |= mask;
goto _found;
}
}
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (ev) {
ev->id = *id;
ev->mask = mask;
list_add_tail(&ev->list, &ctl->events);
} else {
dev_err(card->dev, "No memory available to allocate event\n");
}
_found:
wake_up(&ctl->change_sleep);
spin_unlock_irqrestore(&ctl->read_lock, flags);
kill_fasync(&ctl->fasync, SIGIO, POLL_IN);
}
read_unlock(&card->ctl_files_rwlock);
}
EXPORT_SYMBOL(snd_ctl_notify);
/**
* snd_ctl_new - create a control instance from the template
* @control: the control template
* @access: the default control access
*
* Allocates a new struct snd_kcontrol instance and copies the given template
* to the new instance. It does not copy volatile data (access).
*
* Return: The pointer of the new instance, or %NULL on failure.
*/
static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
unsigned int access)
{
struct snd_kcontrol *kctl;
unsigned int idx;
if (snd_BUG_ON(!control || !control->count))
return NULL;
if (control->count > MAX_CONTROL_COUNT)
return NULL;
kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
if (kctl == NULL) {
pr_err("ALSA: Cannot allocate control instance\n");
return NULL;
}
*kctl = *control;
for (idx = 0; idx < kctl->count; idx++)
kctl->vd[idx].access = access;
return kctl;
}
/**
* snd_ctl_new1 - create a control instance from the template
* @ncontrol: the initialization record
* @private_data: the private data to set
*
* Allocates a new struct snd_kcontrol instance and initialize from the given
* template. When the access field of ncontrol is 0, it's assumed as
* READWRITE access. When the count field is 0, it's assumes as one.
*
* Return: The pointer of the newly generated instance, or %NULL on failure.
*/
struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
void *private_data)
{
struct snd_kcontrol kctl;
unsigned int access;
if (snd_BUG_ON(!ncontrol || !ncontrol->info))
return NULL;
memset(&kctl, 0, sizeof(kctl));
kctl.id.iface = ncontrol->iface;
kctl.id.device = ncontrol->device;
kctl.id.subdevice = ncontrol->subdevice;
if (ncontrol->name) {
strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name));
if (strcmp(ncontrol->name, kctl.id.name) != 0)
pr_warn("ALSA: Control name '%s' truncated to '%s'\n",
ncontrol->name, kctl.id.name);
}
kctl.id.index = ncontrol->index;
kctl.count = ncontrol->count ? ncontrol->count : 1;
access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_VOLATILE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE|
SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND|
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK));
kctl.info = ncontrol->info;
kctl.get = ncontrol->get;
kctl.put = ncontrol->put;
kctl.tlv.p = ncontrol->tlv.p;
kctl.private_value = ncontrol->private_value;
kctl.private_data = private_data;
return snd_ctl_new(&kctl, access);
}
EXPORT_SYMBOL(snd_ctl_new1);
/**
* snd_ctl_free_one - release the control instance
* @kcontrol: the control instance
*
* Releases the control instance created via snd_ctl_new()
* or snd_ctl_new1().
* Don't call this after the control was added to the card.
*/
void snd_ctl_free_one(struct snd_kcontrol *kcontrol)
{
if (kcontrol) {
if (kcontrol->private_free)
kcontrol->private_free(kcontrol);
kfree(kcontrol);
}
}
EXPORT_SYMBOL(snd_ctl_free_one);
static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
unsigned int count)
{
struct snd_kcontrol *kctl;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
card->last_numid = kctl->id.numid + kctl->count - 1;
return true;
}
}
return false;
}
static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
{
unsigned int iter = 100000;
while (snd_ctl_remove_numid_conflict(card, count)) {
if (--iter == 0) {
/* this situation is very unlikely */
dev_err(card->dev, "unable to allocate new control numid\n");
return -ENOMEM;
}
}
return 0;
}
/**
* snd_ctl_add - add the control instance to the card
* @card: the card instance
* @kcontrol: the control instance to add
*
* Adds the control instance created via snd_ctl_new() or
* snd_ctl_new1() to the given card. Assigns also an unique
* numid used for fast search.
*
* It frees automatically the control which cannot be added.
*
* Return: Zero if successful, or a negative error code on failure.
*
*/
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
unsigned int count;
int err = -EINVAL;
if (! kcontrol)
return err;
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
id.iface,
id.device,
id.subdevice,
id.name,
id.index);
err = -EBUSY;
goto error;
}
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
err = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return err;
}
EXPORT_SYMBOL(snd_ctl_add);
/**
* snd_ctl_replace - replace the control instance of the card
* @card: the card instance
* @kcontrol: the control instance to replace
* @add_on_replace: add the control if not already added
*
* Replaces the given control. If the given control does not exist
* and the add_on_replace flag is set, the control is added. If the
* control exists, it is destroyed first.
*
* It frees automatically the control which cannot be added or replaced.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
bool add_on_replace)
{
struct snd_ctl_elem_id id;
unsigned int count;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
if (!kcontrol)
return -EINVAL;
if (snd_BUG_ON(!card || !kcontrol->info)) {
ret = -EINVAL;
goto error;
}
id = kcontrol->id;
down_write(&card->controls_rwsem);
old = snd_ctl_find_id(card, &id);
if (!old) {
if (add_on_replace)
goto add;
up_write(&card->controls_rwsem);
ret = -EINVAL;
goto error;
}
ret = snd_ctl_remove(card, old);
if (ret < 0) {
up_write(&card->controls_rwsem);
goto error;
}
add:
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
ret = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
count = kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return ret;
}
EXPORT_SYMBOL(snd_ctl_replace);
/**
* snd_ctl_remove - remove the control from the card and release it
* @card: the card instance
* @kcontrol: the control instance to remove
*
* Removes the control from the card and then releases the instance.
* You don't need to call snd_ctl_free_one(). You must be in
* the write lock - down_write(&card->controls_rwsem).
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
if (snd_BUG_ON(!card || !kcontrol))
return -EINVAL;
list_del(&kcontrol->list);
card->controls_count -= kcontrol->count;
id = kcontrol->id;
for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id);
snd_ctl_free_one(kcontrol);
return 0;
}
EXPORT_SYMBOL(snd_ctl_remove);
/**
* snd_ctl_remove_id - remove the control of the given id and release it
* @card: the card instance
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
ret = snd_ctl_remove(card, kctl);
up_write(&card->controls_rwsem);
return ret;
}
EXPORT_SYMBOL(snd_ctl_remove_id);
/**
* snd_ctl_remove_user_ctl - remove and release the unlocked user control
* @file: active control handle
* @id: the control id to remove
*
* Finds the control instance with the given id, removes it from the
* card list and releases it.
*
* Return: 0 if successful, or a negative error code on failure.
*/
static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file,
struct snd_ctl_elem_id *id)
{
struct snd_card *card = file->card;
struct snd_kcontrol *kctl;
int idx, ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto error;
}
if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) {
ret = -EINVAL;
goto error;
}
for (idx = 0; idx < kctl->count; idx++)
if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) {
ret = -EBUSY;
goto error;
}
ret = snd_ctl_remove(card, kctl);
if (ret < 0)
goto error;
card->user_ctl_count--;
error:
up_write(&card->controls_rwsem);
return ret;
}
/**
* snd_ctl_activate_id - activate/inactivate the control of the given id
* @card: the card instance
* @id: the control id to activate/inactivate
* @active: non-zero to activate
*
* Finds the control instance with the given id, and activate or
* inactivate the control together with notification, if changed.
*
* Return: 0 if unchanged, 1 if changed, or a negative error code on failure.
*/
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id,
int active)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int ret;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
ret = -ENOENT;
goto unlock;
}
index_offset = snd_ctl_get_ioff(kctl, &kctl->id);
vd = &kctl->vd[index_offset];
ret = 0;
if (active) {
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE))
goto unlock;
vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
} else {
if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)
goto unlock;
vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
}
ret = 1;
unlock:
up_write(&card->controls_rwsem);
if (ret > 0)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id);
return ret;
}
EXPORT_SYMBOL_GPL(snd_ctl_activate_id);
/**
* snd_ctl_rename_id - replace the id of a control on the card
* @card: the card instance
* @src_id: the old id
* @dst_id: the new id
*
* Finds the control with the old id from the card, and replaces the
* id with the new one.
*
* Return: Zero if successful, or a negative error code on failure.
*/
int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id,
struct snd_ctl_elem_id *dst_id)
{
struct snd_kcontrol *kctl;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, src_id);
if (kctl == NULL) {
up_write(&card->controls_rwsem);
return -ENOENT;
}
kctl->id = *dst_id;
kctl->id.numid = card->last_numid + 1;
card->last_numid += kctl->count;
up_write(&card->controls_rwsem);
return 0;
}
EXPORT_SYMBOL(snd_ctl_rename_id);
/**
* snd_ctl_find_numid - find the control instance with the given number-id
* @card: the card instance
* @numid: the number-id to search
*
* Finds the control instance with the given number-id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !numid))
return NULL;
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid)
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_numid);
/**
* snd_ctl_find_id - find the control instance with the given id
* @card: the card instance
* @id: the id to search
*
* Finds the control instance with the given id from the card.
*
* The caller must down card->controls_rwsem before calling this function
* (if the race condition can happen).
*
* Return: The pointer of the instance if found, or %NULL if not.
*
*/
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card,
struct snd_ctl_elem_id *id)
{
struct snd_kcontrol *kctl;
if (snd_BUG_ON(!card || !id))
return NULL;
if (id->numid != 0)
return snd_ctl_find_numid(card, id->numid);
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.iface != id->iface)
continue;
if (kctl->id.device != id->device)
continue;
if (kctl->id.subdevice != id->subdevice)
continue;
if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name)))
continue;
if (kctl->id.index > id->index)
continue;
if (kctl->id.index + kctl->count <= id->index)
continue;
return kctl;
}
return NULL;
}
EXPORT_SYMBOL(snd_ctl_find_id);
static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl,
unsigned int cmd, void __user *arg)
{
struct snd_ctl_card_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
down_read(&snd_ioctl_rwsem);
info->card = card->number;
strlcpy(info->id, card->id, sizeof(info->id));
strlcpy(info->driver, card->driver, sizeof(info->driver));
strlcpy(info->name, card->shortname, sizeof(info->name));
strlcpy(info->longname, card->longname, sizeof(info->longname));
strlcpy(info->mixername, card->mixername, sizeof(info->mixername));
strlcpy(info->components, card->components, sizeof(info->components));
up_read(&snd_ioctl_rwsem);
if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) {
kfree(info);
return -EFAULT;
}
kfree(info);
return 0;
}
static int snd_ctl_elem_list(struct snd_card *card,
struct snd_ctl_elem_list __user *_list)
{
struct list_head *plist;
struct snd_ctl_elem_list list;
struct snd_kcontrol *kctl;
struct snd_ctl_elem_id *dst, *id;
unsigned int offset, space, jidx;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
offset = list.offset;
space = list.space;
/* try limit maximum space */
if (space > 16384)
return -ENOMEM;
if (space > 0) {
/* allocate temporary buffer for atomic operation */
dst = vmalloc(space * sizeof(struct snd_ctl_elem_id));
if (dst == NULL)
return -ENOMEM;
down_read(&card->controls_rwsem);
list.count = card->controls_count;
plist = card->controls.next;
while (plist != &card->controls) {
if (offset == 0)
break;
kctl = snd_kcontrol(plist);
if (offset < kctl->count)
break;
offset -= kctl->count;
plist = plist->next;
}
list.used = 0;
id = dst;
while (space > 0 && plist != &card->controls) {
kctl = snd_kcontrol(plist);
for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) {
snd_ctl_build_ioff(id, kctl, jidx);
id++;
space--;
list.used++;
}
plist = plist->next;
offset = 0;
}
up_read(&card->controls_rwsem);
if (list.used > 0 &&
copy_to_user(list.pids, dst,
list.used * sizeof(struct snd_ctl_elem_id))) {
vfree(dst);
return -EFAULT;
}
vfree(dst);
} else {
down_read(&card->controls_rwsem);
list.count = card->controls_count;
up_read(&card->controls_rwsem);
}
if (copy_to_user(_list, &list, sizeof(list)))
return -EFAULT;
return 0;
}
static int snd_ctl_elem_info(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info *info)
{
struct snd_card *card = ctl->card;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &info->id);
if (kctl == NULL) {
up_read(&card->controls_rwsem);
return -ENOENT;
}
#ifdef CONFIG_SND_DEBUG
info->access = 0;
#endif
result = kctl->info(kctl, info);
if (result >= 0) {
snd_BUG_ON(info->access);
index_offset = snd_ctl_get_ioff(kctl, &info->id);
vd = &kctl->vd[index_offset];
snd_ctl_build_ioff(&info->id, kctl, index_offset);
info->access = vd->access;
if (vd->owner) {
info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK;
if (vd->owner == ctl)
info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER;
info->owner = pid_vnr(vd->owner->pid);
} else {
info->owner = -1;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info __user *_info)
{
struct snd_ctl_elem_info info;
int result;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
snd_power_lock(ctl->card);
result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_info(ctl, &info);
snd_power_unlock(ctl->card);
if (result >= 0)
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return result;
}
static int snd_ctl_elem_read(struct snd_card *card,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) &&
kctl->get != NULL) {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->get(kctl, control);
} else
result = -EPERM;
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_read_user(struct snd_card *card,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_read(card, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
struct snd_ctl_elem_value *control)
{
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int index_offset;
int result;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &control->id);
if (kctl == NULL) {
result = -ENOENT;
} else {
index_offset = snd_ctl_get_ioff(kctl, &control->id);
vd = &kctl->vd[index_offset];
if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) ||
kctl->put == NULL ||
(file && vd->owner && vd->owner != file)) {
result = -EPERM;
} else {
snd_ctl_build_ioff(&control->id, kctl, index_offset);
result = kctl->put(kctl, control);
}
if (result > 0) {
struct snd_ctl_elem_id id = control->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
return 0;
}
}
up_read(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
struct snd_ctl_elem_value __user *_control)
{
struct snd_ctl_elem_value *control;
struct snd_card *card;
int result;
control = memdup_user(_control, sizeof(*control));
if (IS_ERR(control))
return PTR_ERR(control);
card = file->card;
snd_power_lock(card);
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result >= 0)
result = snd_ctl_elem_write(card, file, control);
snd_power_unlock(card);
if (result >= 0)
if (copy_to_user(_control, control, sizeof(*control)))
result = -EFAULT;
kfree(control);
return result;
}
static int snd_ctl_elem_lock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner != NULL)
result = -EBUSY;
else {
vd->owner = file;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_card *card = file->card;
struct snd_ctl_elem_id id;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
int result;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, &id);
if (kctl == NULL) {
result = -ENOENT;
} else {
vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
if (vd->owner == NULL)
result = -EINVAL;
else if (vd->owner != file)
result = -EPERM;
else {
vd->owner = NULL;
result = 0;
}
}
up_write(&card->controls_rwsem);
return result;
}
struct user_element {
struct snd_ctl_elem_info info;
struct snd_card *card;
void *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
unsigned long tlv_data_size; /* TLV data size */
void *priv_data; /* private data (like strings for enumerated type) */
};
static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
*uinfo = ue->info;
return 0;
}
static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct user_element *ue = kcontrol->private_data;
const char *names;
unsigned int item;
item = uinfo->value.enumerated.item;
*uinfo = ue->info;
item = min(item, uinfo->value.enumerated.items - 1);
uinfo->value.enumerated.item = item;
names = ue->priv_data;
for (; item > 0; --item)
names += strlen(names) + 1;
strcpy(uinfo->value.enumerated.name, names);
return 0;
}
static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int change;
struct user_element *ue = kcontrol->private_data;
mutex_lock(&ue->card->user_ctl_lock);
change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
if (change)
memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
mutex_unlock(&ue->card->user_ctl_lock);
} else {
int ret = 0;
mutex_lock(&ue->card->user_ctl_lock);
if (!ue->tlv_data_size || !ue->tlv_data) {
ret = -ENXIO;
goto err_unlock;
}
if (size < ue->tlv_data_size) {
ret = -ENOSPC;
goto err_unlock;
}
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
ret = -EFAULT;
err_unlock:
mutex_unlock(&ue->card->user_ctl_lock);
if (ret)
return ret;
}
return change;
}
static int snd_ctl_elem_init_enum_names(struct user_element *ue)
{
char *names, *p;
size_t buf_len, name_len;
unsigned int i;
const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr;
if (ue->info.value.enumerated.names_length > 64 * 1024)
return -EINVAL;
names = memdup_user((const void __user *)user_ptrval,
ue->info.value.enumerated.names_length);
if (IS_ERR(names))
return PTR_ERR(names);
/* check that there are enough valid names */
buf_len = ue->info.value.enumerated.names_length;
p = names;
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
kfree(names);
return -EINVAL;
}
p += name_len + 1;
buf_len -= name_len + 1;
}
ue->priv_data = names;
ue->info.value.enumerated.names_ptr = 0;
return 0;
}
static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
{
struct user_element *ue = kcontrol->private_data;
kfree(ue->tlv_data);
kfree(ue->priv_data);
kfree(ue);
}
static int snd_ctl_elem_add(struct snd_ctl_file *file,
struct snd_ctl_elem_info *info, int replace)
{
struct snd_card *card = file->card;
struct snd_kcontrol kctl, *_kctl;
unsigned int access;
long private_size;
struct user_element *ue;
int idx, err;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
SNDRV_CTL_ELEM_ACCESS_INACTIVE|
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
if (replace) {
err = snd_ctl_remove_user_ctl(file, &info->id);
if (err)
return err;
}
if (card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED)
kctl.info = snd_ctl_elem_user_enum_info;
else
kctl.info = snd_ctl_elem_user_info;
if (access & SNDRV_CTL_ELEM_ACCESS_READ)
kctl.get = snd_ctl_elem_user_get;
if (access & SNDRV_CTL_ELEM_ACCESS_WRITE)
kctl.put = snd_ctl_elem_user_put;
if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) {
kctl.tlv.c = snd_ctl_elem_user_tlv;
access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
switch (info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
case SNDRV_CTL_ELEM_TYPE_INTEGER:
private_size = sizeof(long);
if (info->count > 128)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
private_size = sizeof(long long);
if (info->count > 64)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
private_size = sizeof(unsigned int);
if (info->count > 128 || info->value.enumerated.items == 0)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_BYTES:
private_size = sizeof(unsigned char);
if (info->count > 512)
return -EINVAL;
break;
case SNDRV_CTL_ELEM_TYPE_IEC958:
private_size = sizeof(struct snd_aes_iec958);
if (info->count != 1)
return -EINVAL;
break;
default:
return -EINVAL;
}
private_size *= info->count;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
ue->elem_data_size = private_size;
if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
err = snd_ctl_elem_init_enum_names(ue);
if (err < 0) {
kfree(ue);
return err;
}
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
kfree(ue->priv_data);
kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
if (err < 0)
return err;
down_write(&card->controls_rwsem);
card->user_ctl_count++;
up_write(&card->controls_rwsem);
return 0;
}
static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
struct snd_ctl_elem_info __user *_info, int replace)
{
struct snd_ctl_elem_info info;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
return snd_ctl_elem_add(file, &info, replace);
}
static int snd_ctl_elem_remove(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_ctl_elem_id id;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
return snd_ctl_remove_user_ctl(file, &id);
}
static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr)
{
int subscribe;
if (get_user(subscribe, ptr))
return -EFAULT;
if (subscribe < 0) {
subscribe = file->subscribed;
if (put_user(subscribe, ptr))
return -EFAULT;
return 0;
}
if (subscribe) {
file->subscribed = 1;
return 0;
} else if (file->subscribed) {
snd_ctl_empty_read_queue(file);
file->subscribed = 0;
}
return 0;
}
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
struct snd_ctl_tlv __user *_tlv,
int op_flag)
{
struct snd_card *card = file->card;
struct snd_ctl_tlv tlv;
struct snd_kcontrol *kctl;
struct snd_kcontrol_volatile *vd;
unsigned int len;
int err = 0;
if (copy_from_user(&tlv, _tlv, sizeof(tlv)))
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
err = -ENOENT;
goto __kctl_end;
}
if (kctl->tlv.p == NULL) {
err = -ENXIO;
goto __kctl_end;
}
vd = &kctl->vd[tlv.numid - kctl->id.numid];
if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) ||
(op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) ||
(op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) {
err = -ENXIO;
goto __kctl_end;
}
if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
if (vd->owner != NULL && vd->owner != file) {
err = -EPERM;
goto __kctl_end;
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
if (op_flag) {
err = -ENXIO;
goto __kctl_end;
}
len = kctl->tlv.p[1] + 2 * sizeof(unsigned int);
if (tlv.length < len) {
err = -ENOMEM;
goto __kctl_end;
}
if (copy_to_user(_tlv->tlv, kctl->tlv.p, len))
err = -EFAULT;
}
__kctl_end:
up_read(&card->controls_rwsem);
return err;
}
static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_ctl_file *ctl;
struct snd_card *card;
struct snd_kctl_ioctl *p;
void __user *argp = (void __user *)arg;
int __user *ip = argp;
int err;
ctl = file->private_data;
card = ctl->card;
if (snd_BUG_ON(!card))
return -ENXIO;
switch (cmd) {
case SNDRV_CTL_IOCTL_PVERSION:
return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0;
case SNDRV_CTL_IOCTL_CARD_INFO:
return snd_ctl_card_info(card, ctl, cmd, argp);
case SNDRV_CTL_IOCTL_ELEM_LIST:
return snd_ctl_elem_list(card, argp);
case SNDRV_CTL_IOCTL_ELEM_INFO:
return snd_ctl_elem_info_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_READ:
return snd_ctl_elem_read_user(card, argp);
case SNDRV_CTL_IOCTL_ELEM_WRITE:
return snd_ctl_elem_write_user(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_LOCK:
return snd_ctl_elem_lock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_UNLOCK:
return snd_ctl_elem_unlock(ctl, argp);
case SNDRV_CTL_IOCTL_ELEM_ADD:
return snd_ctl_elem_add_user(ctl, argp, 0);
case SNDRV_CTL_IOCTL_ELEM_REPLACE:
return snd_ctl_elem_add_user(ctl, argp, 1);
case SNDRV_CTL_IOCTL_ELEM_REMOVE:
return snd_ctl_elem_remove(ctl, argp);
case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS:
return snd_ctl_subscribe_events(ctl, ip);
case SNDRV_CTL_IOCTL_TLV_READ:
return snd_ctl_tlv_ioctl(ctl, argp, 0);
case SNDRV_CTL_IOCTL_TLV_WRITE:
return snd_ctl_tlv_ioctl(ctl, argp, 1);
case SNDRV_CTL_IOCTL_TLV_COMMAND:
return snd_ctl_tlv_ioctl(ctl, argp, -1);
case SNDRV_CTL_IOCTL_POWER:
return -ENOPROTOOPT;
case SNDRV_CTL_IOCTL_POWER_STATE:
#ifdef CONFIG_PM
return put_user(card->power_state, ip) ? -EFAULT : 0;
#else
return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0;
#endif
}
down_read(&snd_ioctl_rwsem);
list_for_each_entry(p, &snd_control_ioctls, list) {
err = p->fioctl(card, ctl, cmd, arg);
if (err != -ENOIOCTLCMD) {
up_read(&snd_ioctl_rwsem);
return err;
}
}
up_read(&snd_ioctl_rwsem);
dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd);
return -ENOTTY;
}
static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
size_t count, loff_t * offset)
{
struct snd_ctl_file *ctl;
int err = 0;
ssize_t result = 0;
ctl = file->private_data;
if (snd_BUG_ON(!ctl || !ctl->card))
return -ENXIO;
if (!ctl->subscribed)
return -EBADFD;
if (count < sizeof(struct snd_ctl_event))
return -EINVAL;
spin_lock_irq(&ctl->read_lock);
while (count >= sizeof(struct snd_ctl_event)) {
struct snd_ctl_event ev;
struct snd_kctl_event *kev;
while (list_empty(&ctl->events)) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto __end_lock;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&ctl->read_lock);
schedule();
remove_wait_queue(&ctl->change_sleep, &wait);
if (ctl->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ctl->read_lock);
}
kev = snd_kctl_event(ctl->events.next);
ev.type = SNDRV_CTL_EVENT_ELEM;
ev.data.elem.mask = kev->mask;
ev.data.elem.id = kev->id;
list_del(&kev->list);
spin_unlock_irq(&ctl->read_lock);
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) {
err = -EFAULT;
goto __end;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(struct snd_ctl_event);
count -= sizeof(struct snd_ctl_event);
result += sizeof(struct snd_ctl_event);
}
__end_lock:
spin_unlock_irq(&ctl->read_lock);
__end:
return result > 0 ? result : err;
}
static unsigned int snd_ctl_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_ctl_file *ctl;
ctl = file->private_data;
if (!ctl->subscribed)
return 0;
poll_wait(file, &ctl->change_sleep, wait);
mask = 0;
if (!list_empty(&ctl->events))
mask |= POLLIN | POLLRDNORM;
return mask;
}
/*
* register the device-specific control-ioctls.
* called from each device manager like pcm.c, hwdep.c, etc.
*/
static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists)
{
struct snd_kctl_ioctl *pn;
pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL);
if (pn == NULL)
return -ENOMEM;
pn->fioctl = fcn;
down_write(&snd_ioctl_rwsem);
list_add_tail(&pn->list, lists);
up_write(&snd_ioctl_rwsem);
return 0;
}
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_register_ioctl_compat);
#endif
/*
* de-register the device-specific control-ioctls.
*/
static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn,
struct list_head *lists)
{
struct snd_kctl_ioctl *p;
if (snd_BUG_ON(!fcn))
return -EINVAL;
down_write(&snd_ioctl_rwsem);
list_for_each_entry(p, lists, list) {
if (p->fioctl == fcn) {
list_del(&p->list);
up_write(&snd_ioctl_rwsem);
kfree(p);
return 0;
}
}
up_write(&snd_ioctl_rwsem);
snd_BUG();
return -EINVAL;
}
int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl);
#ifdef CONFIG_COMPAT
int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn)
{
return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls);
}
EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat);
#endif
static int snd_ctl_fasync(int fd, struct file * file, int on)
{
struct snd_ctl_file *ctl;
ctl = file->private_data;
return fasync_helper(fd, file, on, &ctl->fasync);
}
/*
* ioctl32 compat
*/
#ifdef CONFIG_COMPAT
#include "control_compat.c"
#else
#define snd_ctl_ioctl_compat NULL
#endif
/*
* INIT PART
*/
static const struct file_operations snd_ctl_f_ops =
{
.owner = THIS_MODULE,
.read = snd_ctl_read,
.open = snd_ctl_open,
.release = snd_ctl_release,
.llseek = no_llseek,
.poll = snd_ctl_poll,
.unlocked_ioctl = snd_ctl_ioctl,
.compat_ioctl = snd_ctl_ioctl_compat,
.fasync = snd_ctl_fasync,
};
/*
* registration of the control device
*/
static int snd_ctl_dev_register(struct snd_device *device)
{
struct snd_card *card = device->device_data;
int err, cardnum;
char name[16];
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
sprintf(name, "controlC%i", cardnum);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1,
&snd_ctl_f_ops, card, name)) < 0)
return err;
return 0;
}
/*
* disconnection of the control device
*/
static int snd_ctl_dev_disconnect(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_ctl_file *ctl;
int err, cardnum;
if (snd_BUG_ON(!card))
return -ENXIO;
cardnum = card->number;
if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS))
return -ENXIO;
read_lock(&card->ctl_files_rwlock);
list_for_each_entry(ctl, &card->ctl_files, list) {
wake_up(&ctl->change_sleep);
kill_fasync(&ctl->fasync, SIGIO, POLL_ERR);
}
read_unlock(&card->ctl_files_rwlock);
if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL,
card, -1)) < 0)
return err;
return 0;
}
/*
* free all controls
*/
static int snd_ctl_dev_free(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_kcontrol *control;
down_write(&card->controls_rwsem);
while (!list_empty(&card->controls)) {
control = snd_kcontrol(card->controls.next);
snd_ctl_remove(card, control);
}
up_write(&card->controls_rwsem);
return 0;
}
/*
* create control core:
* called from init.c
*/
int snd_ctl_create(struct snd_card *card)
{
static struct snd_device_ops ops = {
.dev_free = snd_ctl_dev_free,
.dev_register = snd_ctl_dev_register,
.dev_disconnect = snd_ctl_dev_disconnect,
};
if (snd_BUG_ON(!card))
return -ENXIO;
return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops);
}
/*
* Frequently used control callbacks/helpers
*/
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_mono_info);
int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
/**
* snd_ctl_enum_info - fills the info structure for an enumerated control
* @info: the structure to be filled
* @channels: the number of the control's channels; often one
* @items: the number of control values; also the size of @names
* @names: an array containing the names of all control values
*
* Sets all required fields in @info to their appropriate values.
* If the control's accessibility is not the default (readable and writable),
* the caller has to fill @info->access.
*
* Return: Zero.
*/
int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
unsigned int items, const char *const names[])
{
info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
info->count = channels;
info->value.enumerated.items = items;
if (info->value.enumerated.item >= items)
info->value.enumerated.item = items - 1;
strlcpy(info->value.enumerated.name,
names[info->value.enumerated.item],
sizeof(info->value.enumerated.name));
return 0;
}
EXPORT_SYMBOL(snd_ctl_enum_info);
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2219_0 |
crossvul-cpp_data_good_5755_1 | /*
* BRIEF MODULE DESCRIPTION
* Au1100 LCD Driver.
*
* Rewritten for 2.6 by Embedded Alley Solutions
* <source@embeddedalley.com>, based on submissions by
* Karl Lessard <klessard@sunrisetelecom.com>
* <c.pellegrin@exadron.com>
*
* PM support added by Rodolfo Giometti <giometti@linux.it>
* Cursor enable/disable by Rodolfo Giometti <giometti@linux.it>
*
* Copyright 2002 MontaVista Software
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
* Copyright 2002 Alchemy Semiconductor
* Author: Alchemy Semiconductor
*
* Based on:
* linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
* Created 28 Dec 1997 by Geert Uytterhoeven
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
#define DEBUG 0
#include "au1100fb.h"
#define DRIVER_NAME "au1100fb"
#define DRIVER_DESC "LCD controller driver for AU1100 processors"
#define to_au1100fb_device(_info) \
(_info ? container_of(_info, struct au1100fb_device, info) : NULL);
/* Bitfields format supported by the controller. Note that the order of formats
* SHOULD be the same as in the LCD_CONTROL_SBPPF field, so we can retrieve the
* right pixel format by doing rgb_bitfields[LCD_CONTROL_SBPPF_XXX >> LCD_CONTROL_SBPPF]
*/
struct fb_bitfield rgb_bitfields[][4] =
{
/* Red, Green, Blue, Transp */
{ { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
{ { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
{ { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
{ { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
{ { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
/* The last is used to describe 12bpp format */
{ { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } },
};
static struct fb_fix_screeninfo au1100fb_fix = {
.id = "AU1100 FB",
.xpanstep = 1,
.ypanstep = 1,
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
};
static struct fb_var_screeninfo au1100fb_var = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
/* fb_blank
* Blank the screen. Depending on the mode, the screen will be
* activated with the backlight color, or desactivated
*/
static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
print_dbg("fb_blank %d %p", blank_mode, fbi);
switch (blank_mode) {
case VESA_NO_BLANKING:
/* Turn on panel */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
au_sync();
break;
case VESA_VSYNC_SUSPEND:
case VESA_HSYNC_SUSPEND:
case VESA_POWERDOWN:
/* Turn off panel */
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
au_sync();
break;
default:
break;
}
return 0;
}
/*
* Set hardware with var settings. This will enable the controller with a specific
* mode, normally validated with the fb_check_var method
*/
int au1100fb_setmode(struct au1100fb_device *fbdev)
{
struct fb_info *info = &fbdev->info;
u32 words;
int index;
if (!fbdev)
return -EINVAL;
/* Update var-dependent FB info */
if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) {
if (info->var.bits_per_pixel <= 8) {
/* palettized */
info->var.red.offset = 0;
info->var.red.length = info->var.bits_per_pixel;
info->var.red.msb_right = 0;
info->var.green.offset = 0;
info->var.green.length = info->var.bits_per_pixel;
info->var.green.msb_right = 0;
info->var.blue.offset = 0;
info->var.blue.length = info->var.bits_per_pixel;
info->var.blue.msb_right = 0;
info->var.transp.offset = 0;
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = info->var.xres_virtual /
(8/info->var.bits_per_pixel);
} else {
/* non-palettized */
index = (fbdev->panel->control_base & LCD_CONTROL_SBPPF_MASK) >> LCD_CONTROL_SBPPF_BIT;
info->var.red = rgb_bitfields[index][0];
info->var.green = rgb_bitfields[index][1];
info->var.blue = rgb_bitfields[index][2];
info->var.transp = rgb_bitfields[index][3];
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */
}
} else {
/* mono */
info->fix.visual = FB_VISUAL_MONO10;
info->fix.line_length = info->var.xres_virtual / 8;
}
info->screen_size = info->fix.line_length * info->var.yres_virtual;
info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \
>> LCD_CONTROL_SM_BIT) * 90;
/* Determine BPP mode and format */
fbdev->regs->lcd_control = fbdev->panel->control_base;
fbdev->regs->lcd_horztiming = fbdev->panel->horztiming;
fbdev->regs->lcd_verttiming = fbdev->panel->verttiming;
fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base;
fbdev->regs->lcd_intenable = 0;
fbdev->regs->lcd_intstatus = 0;
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys);
if (panel_is_dual(fbdev->panel)) {
/* Second panel display seconf half of screen if possible,
* otherwise display the same as the first panel */
if (info->var.yres_virtual >= (info->var.yres << 1)) {
fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys +
(info->fix.line_length *
(info->var.yres_virtual >> 1)));
} else {
fbdev->regs->lcd_dmaaddr1 = LCD_DMA_SA_N(fbdev->fb_phys);
}
}
words = info->fix.line_length / sizeof(u32);
if (!info->var.rotate || (info->var.rotate == 180)) {
words *= info->var.yres_virtual;
if (info->var.rotate /* 180 */) {
words -= (words % 8); /* should be divisable by 8 */
}
}
fbdev->regs->lcd_words = LCD_WRD_WRDS_N(words);
fbdev->regs->lcd_pwmdiv = 0;
fbdev->regs->lcd_pwmhi = 0;
/* Resume controller */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
mdelay(10);
au1100fb_fb_blank(VESA_NO_BLANKING, info);
return 0;
}
/* fb_setcolreg
* Set color in LCD palette.
*/
int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi)
{
struct au1100fb_device *fbdev;
u32 *palette;
u32 value;
fbdev = to_au1100fb_device(fbi);
palette = fbdev->regs->lcd_pallettebase;
if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale */
red = green = blue =
(19595 * red + 38470 * green + 7471 * blue) >> 16;
}
if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
palette = (u32*)fbi->pseudo_palette;
red >>= (16 - fbi->var.red.length);
green >>= (16 - fbi->var.green.length);
blue >>= (16 - fbi->var.blue.length);
value = (red << fbi->var.red.offset) |
(green << fbi->var.green.offset)|
(blue << fbi->var.blue.offset);
value &= 0xFFFF;
} else if (panel_is_active(fbdev->panel)) {
/* COLOR TFT PALLETTIZED (use RGB 565) */
value = (red & 0xF800)|((green >> 5) & 0x07E0)|((blue >> 11) & 0x001F);
value &= 0xFFFF;
} else if (panel_is_color(fbdev->panel)) {
/* COLOR STN MODE */
value = (((panel_swap_rgb(fbdev->panel) ? blue : red) >> 12) & 0x000F) |
((green >> 8) & 0x00F0) |
(((panel_swap_rgb(fbdev->panel) ? red : blue) >> 4) & 0x0F00);
value &= 0xFFF;
} else {
/* MONOCHROME MODE */
value = (green >> 12) & 0x000F;
value &= 0xF;
}
palette[regno] = value;
return 0;
}
/* fb_pan_display
* Pan display in x and/or y as specified
*/
int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
struct au1100fb_device *fbdev;
int dy;
fbdev = to_au1100fb_device(fbi);
print_dbg("fb_pan_display %p %p", var, fbi);
if (!var || !fbdev) {
return -EINVAL;
}
if (var->xoffset - fbi->var.xoffset) {
/* No support for X panning for now! */
return -EINVAL;
}
print_dbg("fb_pan_display 2 %p %p", var, fbi);
dy = var->yoffset - fbi->var.yoffset;
if (dy) {
u32 dmaaddr;
print_dbg("Panning screen of %d lines", dy);
dmaaddr = fbdev->regs->lcd_dmaaddr0;
dmaaddr += (fbi->fix.line_length * dy);
/* TODO: Wait for current frame to finished */
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr);
if (panel_is_dual(fbdev->panel)) {
dmaaddr = fbdev->regs->lcd_dmaaddr1;
dmaaddr += (fbi->fix.line_length * dy);
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(dmaaddr);
}
}
print_dbg("fb_pan_display 3 %p %p", var, fbi);
return 0;
}
/* fb_rotate
* Rotate the display of this angle. This doesn't seems to be used by the core,
* but as our hardware supports it, so why not implementing it...
*/
void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
print_dbg("fb_rotate %p %d", fbi, angle);
if (fbdev && (angle > 0) && !(angle % 90)) {
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
fbdev->regs->lcd_control &= ~(LCD_CONTROL_SM_MASK);
fbdev->regs->lcd_control |= ((angle/90) << LCD_CONTROL_SM_BIT);
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
}
}
/* fb_mmap
* Map video memory in user space. We don't use the generic fb_mmap method mainly
* to allow the use of the TLB streaming flag (CCA=6)
*/
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev;
fbdev = to_au1100fb_device(fbi);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
}
static struct fb_ops au1100fb_ops =
{
.owner = THIS_MODULE,
.fb_setcolreg = au1100fb_fb_setcolreg,
.fb_blank = au1100fb_fb_blank,
.fb_pan_display = au1100fb_fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_rotate = au1100fb_fb_rotate,
.fb_mmap = au1100fb_fb_mmap,
};
/*-------------------------------------------------------------------------*/
static int au1100fb_setup(struct au1100fb_device *fbdev)
{
char *this_opt, *options;
int num_panels = ARRAY_SIZE(known_lcd_panels);
if (num_panels <= 0) {
print_err("No LCD panels supported by driver!");
return -ENODEV;
}
if (fb_get_options(DRIVER_NAME, &options))
return -ENODEV;
if (!options)
return -ENODEV;
while ((this_opt = strsep(&options, ",")) != NULL) {
/* Panel option */
if (!strncmp(this_opt, "panel:", 6)) {
int i;
this_opt += 6;
for (i = 0; i < num_panels; i++) {
if (!strncmp(this_opt, known_lcd_panels[i].name,
strlen(this_opt))) {
fbdev->panel = &known_lcd_panels[i];
fbdev->panel_idx = i;
break;
}
}
if (i >= num_panels) {
print_warn("Panel '%s' not supported!", this_opt);
return -ENODEV;
}
}
/* Unsupported option */
else
print_warn("Unsupported option \"%s\"", this_opt);
}
print_info("Panel=%s", fbdev->panel->name);
return 0;
}
static int au1100fb_drv_probe(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
struct resource *regs_res;
unsigned long page;
u32 sys_clksrc;
/* Allocate new device private */
fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device),
GFP_KERNEL);
if (!fbdev) {
print_err("fail to allocate device private record");
return -ENOMEM;
}
if (au1100fb_setup(fbdev))
goto failed;
platform_set_drvdata(dev, (void *)fbdev);
/* Allocate region for our registers and map them */
regs_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!regs_res) {
print_err("fail to retrieve registers resource");
return -EFAULT;
}
au1100fb_fix.mmio_start = regs_res->start;
au1100fb_fix.mmio_len = resource_size(regs_res);
if (!devm_request_mem_region(&dev->dev,
au1100fb_fix.mmio_start,
au1100fb_fix.mmio_len,
DRIVER_NAME)) {
print_err("fail to lock memory region at 0x%08lx",
au1100fb_fix.mmio_start);
return -EBUSY;
}
fbdev->regs = (struct au1100fb_regs*)KSEG1ADDR(au1100fb_fix.mmio_start);
print_dbg("Register memory map at %p", fbdev->regs);
print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len);
/* Allocate the framebuffer to the maximum screen size * nbr of video buffers */
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
return -ENOMEM;
}
au1100fb_fix.smem_start = fbdev->fb_phys;
au1100fb_fix.smem_len = fbdev->fb_len;
/*
* Set page reserved so that mmap will work. This is necessary
* since we'll be remapping normal memory.
*/
for (page = (unsigned long)fbdev->fb_mem;
page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len);
page += PAGE_SIZE) {
#ifdef CONFIG_DMA_NONCOHERENT
SetPageReserved(virt_to_page(CAC_ADDR((void *)page)));
#else
SetPageReserved(virt_to_page(page));
#endif
}
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Setup LCD clock to AUX (48 MHz) */
sys_clksrc = au_readl(SYS_CLKSRC) & ~(SYS_CS_ML_MASK | SYS_CS_DL | SYS_CS_CL);
au_writel((sys_clksrc | (1 << SYS_CS_ML_BIT)), SYS_CLKSRC);
/* load the panel info into the var struct */
au1100fb_var.bits_per_pixel = fbdev->panel->bpp;
au1100fb_var.xres = fbdev->panel->xres;
au1100fb_var.xres_virtual = au1100fb_var.xres;
au1100fb_var.yres = fbdev->panel->yres;
au1100fb_var.yres_virtual = au1100fb_var.yres;
fbdev->info.screen_base = fbdev->fb_mem;
fbdev->info.fbops = &au1100fb_ops;
fbdev->info.fix = au1100fb_fix;
fbdev->info.pseudo_palette =
devm_kzalloc(&dev->dev, sizeof(u32) * 16, GFP_KERNEL);
if (!fbdev->info.pseudo_palette)
return -ENOMEM;
if (fb_alloc_cmap(&fbdev->info.cmap, AU1100_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
AU1100_LCD_NBR_PALETTE_ENTRIES);
return -EFAULT;
}
fbdev->info.var = au1100fb_var;
/* Set h/w registers */
au1100fb_setmode(fbdev);
/* Register new framebuffer */
if (register_framebuffer(&fbdev->info) < 0) {
print_err("cannot register new framebuffer");
goto failed;
}
return 0;
failed:
if (fbdev->fb_mem) {
dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
fbdev->fb_phys);
}
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
}
return -ENODEV;
}
int au1100fb_drv_remove(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
if (!dev)
return -ENODEV;
fbdev = (struct au1100fb_device *) platform_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
#endif
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
/* Clean up all probe data */
unregister_framebuffer(&fbdev->info);
fb_dealloc_cmap(&fbdev->info.cmap);
return 0;
}
#ifdef CONFIG_PM
static u32 sys_clksrc;
static struct au1100fb_regs fbregs;
int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
/* Save the clock source state */
sys_clksrc = au_readl(SYS_CLKSRC);
/* Blank the LCD */
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
/* Stop LCD clocking */
au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
return 0;
}
int au1100fb_drv_resume(struct platform_device *dev)
{
struct au1100fb_device *fbdev = platform_get_drvdata(dev);
if (!fbdev)
return 0;
memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
/* Restart LCD clocking */
au_writel(sys_clksrc, SYS_CLKSRC);
/* Unblank the LCD */
au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
return 0;
}
#else
#define au1100fb_drv_suspend NULL
#define au1100fb_drv_resume NULL
#endif
static struct platform_driver au1100fb_driver = {
.driver = {
.name = "au1100-lcd",
.owner = THIS_MODULE,
},
.probe = au1100fb_drv_probe,
.remove = au1100fb_drv_remove,
.suspend = au1100fb_drv_suspend,
.resume = au1100fb_drv_resume,
};
static int __init au1100fb_load(void)
{
return platform_driver_register(&au1100fb_driver);
}
static void __exit au1100fb_unload(void)
{
platform_driver_unregister(&au1100fb_driver);
}
module_init(au1100fb_load);
module_exit(au1100fb_unload);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5755_1 |
crossvul-cpp_data_bad_2020_4 | /*
* contrib/intarray/_int_bool.c
*/
#include "postgres.h"
#include "miscadmin.h"
#include "utils/builtins.h"
#include "_int.h"
PG_FUNCTION_INFO_V1(bqarr_in);
PG_FUNCTION_INFO_V1(bqarr_out);
Datum bqarr_in(PG_FUNCTION_ARGS);
Datum bqarr_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(boolop);
Datum boolop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(rboolop);
Datum rboolop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(querytree);
Datum querytree(PG_FUNCTION_ARGS);
/* parser's states */
#define WAITOPERAND 1
#define WAITENDOPERAND 2
#define WAITOPERATOR 3
/*
* node of query tree, also used
* for storing polish notation in parser
*/
typedef struct NODE
{
int32 type;
int32 val;
struct NODE *next;
} NODE;
typedef struct
{
char *buf;
int32 state;
int32 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
int32 num;
} WORKSTATE;
/*
* get token from query string
*/
static int32
gettoken(WORKSTATE *state, int32 *val)
{
char nnn[16];
int innn;
*val = 0; /* default result */
innn = 0;
while (1)
{
if (innn >= sizeof(nnn))
return ERR; /* buffer overrun => syntax error */
switch (state->state)
{
case WAITOPERAND:
innn = 0;
if ((*(state->buf) >= '0' && *(state->buf) <= '9') ||
*(state->buf) == '-')
{
state->state = WAITENDOPERAND;
nnn[innn++] = *(state->buf);
}
else if (*(state->buf) == '!')
{
(state->buf)++;
*val = (int32) '!';
return OPR;
}
else if (*(state->buf) == '(')
{
state->count++;
(state->buf)++;
return OPEN;
}
else if (*(state->buf) != ' ')
return ERR;
break;
case WAITENDOPERAND:
if (*(state->buf) >= '0' && *(state->buf) <= '9')
{
nnn[innn++] = *(state->buf);
}
else
{
long lval;
nnn[innn] = '\0';
errno = 0;
lval = strtol(nnn, NULL, 0);
*val = (int32) lval;
if (errno != 0 || (long) *val != lval)
return ERR;
state->state = WAITOPERATOR;
return (state->count && *(state->buf) == '\0')
? ERR : VAL;
}
break;
case WAITOPERATOR:
if (*(state->buf) == '&' || *(state->buf) == '|')
{
state->state = WAITOPERAND;
*val = (int32) *(state->buf);
(state->buf)++;
return OPR;
}
else if (*(state->buf) == ')')
{
(state->buf)++;
state->count--;
return (state->count < 0) ? ERR : CLOSE;
}
else if (*(state->buf) == '\0')
return (state->count) ? ERR : END;
else if (*(state->buf) != ' ')
return ERR;
break;
default:
return ERR;
break;
}
(state->buf)++;
}
}
/*
* push new one in polish notation reverse view
*/
static void
pushquery(WORKSTATE *state, int32 type, int32 val)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
tmp->type = type;
tmp->val = val;
tmp->next = state->str;
state->str = tmp;
state->num++;
}
#define STACKDEPTH 16
/*
* make polish notation of query
*/
static int32
makepol(WORKSTATE *state)
{
int32 val,
type;
int32 stack[STACKDEPTH];
int32 lenstack = 0;
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
while ((type = gettoken(state, &val)) != END)
{
switch (type)
{
case VAL:
pushquery(state, type, val);
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case OPR:
if (lenstack && val == (int32) '|')
pushquery(state, OPR, val);
else
{
if (lenstack == STACKDEPTH)
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("statement too complex")));
stack[lenstack] = val;
lenstack++;
}
break;
case OPEN:
if (makepol(state) == ERR)
return ERR;
while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case CLOSE:
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
};
return END;
break;
case ERR:
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
return ERR;
}
}
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
};
return END;
}
typedef struct
{
int32 *arrb;
int32 *arre;
} CHKVAL;
/*
* is there value 'val' in (sorted) array or not ?
*/
static bool
checkcondition_arr(void *checkval, ITEM *item)
{
int32 *StopLow = ((CHKVAL *) checkval)->arrb;
int32 *StopHigh = ((CHKVAL *) checkval)->arre;
int32 *StopMiddle;
/* Loop invariant: StopLow <= val < StopHigh */
while (StopLow < StopHigh)
{
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
if (*StopMiddle == item->val)
return (true);
else if (*StopMiddle < item->val)
StopLow = StopMiddle + 1;
else
StopHigh = StopMiddle;
}
return false;
}
static bool
checkcondition_bit(void *checkval, ITEM *item)
{
return GETBIT(checkval, HASHVAL(item->val));
}
/*
* evaluate boolean expression, using chkcond() to test the primitive cases
*/
static bool
execute(ITEM *curitem, void *checkval, bool calcnot,
bool (*chkcond) (void *checkval, ITEM *item))
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == VAL)
return (*chkcond) (checkval, curitem);
else if (curitem->val == (int32) '!')
{
return (calcnot) ?
((execute(curitem - 1, checkval, calcnot, chkcond)) ? false : true)
: true;
}
else if (curitem->val == (int32) '&')
{
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return execute(curitem - 1, checkval, calcnot, chkcond);
else
return false;
}
else
{ /* |-operator */
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return true;
else
return execute(curitem - 1, checkval, calcnot, chkcond);
}
}
/*
* signconsistent & execconsistent called by *_consistent
*/
bool
signconsistent(QUERYTYPE *query, BITVEC sign, bool calcnot)
{
return execute(GETQUERY(query) + query->size - 1,
(void *) sign, calcnot,
checkcondition_bit);
}
/* Array must be sorted! */
bool
execconsistent(QUERYTYPE *query, ArrayType *array, bool calcnot)
{
CHKVAL chkval;
CHECKARRVALID(array);
chkval.arrb = ARRPTR(array);
chkval.arre = chkval.arrb + ARRNELEMS(array);
return execute(GETQUERY(query) + query->size - 1,
(void *) &chkval, calcnot,
checkcondition_arr);
}
typedef struct
{
ITEM *first;
bool *mapped_check;
} GinChkVal;
static bool
checkcondition_gin(void *checkval, ITEM *item)
{
GinChkVal *gcv = (GinChkVal *) checkval;
return gcv->mapped_check[item - gcv->first];
}
bool
gin_bool_consistent(QUERYTYPE *query, bool *check)
{
GinChkVal gcv;
ITEM *items = GETQUERY(query);
int i,
j = 0;
if (query->size <= 0)
return FALSE;
/*
* Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size);
for (i = 0; i < query->size; i++)
{
if (items[i].type == VAL)
gcv.mapped_check[i] = check[j++];
}
return execute(GETQUERY(query) + query->size - 1,
(void *) &gcv, true,
checkcondition_gin);
}
static bool
contains_required_value(ITEM *curitem)
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == VAL)
return true;
else if (curitem->val == (int32) '!')
{
/*
* Assume anything under a NOT is non-required. For some cases with
* nested NOTs, we could prove there's a required value, but it seems
* unlikely to be worth the trouble.
*/
return false;
}
else if (curitem->val == (int32) '&')
{
/* If either side has a required value, we're good */
if (contains_required_value(curitem + curitem->left))
return true;
else
return contains_required_value(curitem - 1);
}
else
{ /* |-operator */
/* Both sides must have required values */
if (contains_required_value(curitem + curitem->left))
return contains_required_value(curitem - 1);
else
return false;
}
}
bool
query_has_required_values(QUERYTYPE *query)
{
if (query->size <= 0)
return false;
return contains_required_value(GETQUERY(query) + query->size - 1);
}
/*
* boolean operations
*/
Datum
rboolop(PG_FUNCTION_ARGS)
{
/* just reverse the operands */
return DirectFunctionCall2(boolop,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0));
}
Datum
boolop(PG_FUNCTION_ARGS)
{
ArrayType *val = PG_GETARG_ARRAYTYPE_P_COPY(0);
QUERYTYPE *query = PG_GETARG_QUERYTYPE_P(1);
CHKVAL chkval;
bool result;
CHECKARRVALID(val);
PREPAREARR(val);
chkval.arrb = ARRPTR(val);
chkval.arre = chkval.arrb + ARRNELEMS(val);
result = execute(GETQUERY(query) + query->size - 1,
&chkval, true,
checkcondition_arr);
pfree(val);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(result);
}
static void
findoprnd(ITEM *ptr, int32 *pos)
{
#ifdef BS_DEBUG
elog(DEBUG3, (ptr[*pos].type == OPR) ?
"%d %c" : "%d %d", *pos, ptr[*pos].val);
#endif
if (ptr[*pos].type == VAL)
{
ptr[*pos].left = 0;
(*pos)--;
}
else if (ptr[*pos].val == (int32) '!')
{
ptr[*pos].left = -1;
(*pos)--;
findoprnd(ptr, pos);
}
else
{
ITEM *curitem = &ptr[*pos];
int32 tmp = *pos;
(*pos)--;
findoprnd(ptr, pos);
curitem->left = *pos - tmp;
findoprnd(ptr, pos);
}
}
/*
* input
*/
Datum
bqarr_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
WORKSTATE state;
int32 i;
QUERYTYPE *query;
int32 commonlen;
ITEM *ptr;
NODE *tmp;
int32 pos = 0;
#ifdef BS_DEBUG
StringInfoData pbuf;
#endif
state.buf = buf;
state.state = WAITOPERAND;
state.count = 0;
state.num = 0;
state.str = NULL;
/* make polish notation (postfix, but in reverse order) */
makepol(&state);
if (!state.num)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
commonlen = COMPUTESIZE(state.num);
query = (QUERYTYPE *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
ptr = GETQUERY(query);
for (i = state.num - 1; i >= 0; i--)
{
ptr[i].type = state.str->type;
ptr[i].val = state.str->val;
tmp = state.str->next;
pfree(state.str);
state.str = tmp;
}
pos = query->size - 1;
findoprnd(ptr, &pos);
#ifdef BS_DEBUG
initStringInfo(&pbuf);
for (i = 0; i < query->size; i++)
{
if (ptr[i].type == OPR)
appendStringInfo(&pbuf, "%c(%d) ", ptr[i].val, ptr[i].left);
else
appendStringInfo(&pbuf, "%d ", ptr[i].val);
}
elog(DEBUG3, "POR: %s", pbuf.data);
pfree(pbuf.data);
#endif
PG_RETURN_POINTER(query);
}
/*
* out function
*/
typedef struct
{
ITEM *curpol;
char *buf;
char *cur;
int32 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) { \
int32 len = inf->cur - inf->buf; \
inf->buflen *= 2; \
inf->buf = (char*) repalloc( (void*)inf->buf, inf->buflen ); \
inf->cur = inf->buf + len; \
}
static void
infix(INFIX *in, bool first)
{
if (in->curpol->type == VAL)
{
RESIZEBUF(in, 11);
sprintf(in->cur, "%d", in->curpol->val);
in->cur = strchr(in->cur, '\0');
in->curpol--;
}
else if (in->curpol->val == (int32) '!')
{
bool isopr = false;
RESIZEBUF(in, 1);
*(in->cur) = '!';
in->cur++;
*(in->cur) = '\0';
in->curpol--;
if (in->curpol->type == OPR)
{
isopr = true;
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
infix(in, isopr);
if (isopr)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
else
{
int32 op = in->curpol->val;
INFIX nrm;
in->curpol--;
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
nrm.curpol = in->curpol;
nrm.buflen = 16;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
/* get right operand */
infix(&nrm, false);
/* get & print left operand */
in->curpol = nrm.curpol;
infix(in, false);
/* print operator & right operand */
RESIZEBUF(in, 3 + (nrm.cur - nrm.buf));
sprintf(in->cur, " %c %s", op, nrm.buf);
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
}
Datum
bqarr_out(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = PG_GETARG_QUERYTYPE_P(0);
INFIX nrm;
if (query->size == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
nrm.curpol = GETQUERY(query) + query->size - 1;
nrm.buflen = 32;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
infix(&nrm, true);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_POINTER(nrm.buf);
}
/* Useless old "debugging" function for a fundamentally wrong algorithm */
Datum
querytree(PG_FUNCTION_ARGS)
{
elog(ERROR, "querytree is no longer implemented");
PG_RETURN_NULL();
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2020_4 |
crossvul-cpp_data_good_3570_0 | /*
* Copyright (c) 2008, Christoph Hellwig
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
/*
* Locking scheme:
* - all ACL updates are protected by inode->i_mutex, which is taken before
* calling into this file.
*/
STATIC struct posix_acl *
xfs_acl_from_disk(struct xfs_acl *aclp)
{
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
struct xfs_acl_entry *ace;
int count, i;
count = be32_to_cpu(aclp->acl_cnt);
if (count > XFS_ACL_MAX_ENTRIES)
return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
acl_e = &acl->a_entries[i];
ace = &aclp->acl_entry[i];
/*
* The tag is 32 bits on disk and 16 bits in core.
*
* Because every access to it goes through the core
* format first this is not a problem.
*/
acl_e->e_tag = be32_to_cpu(ace->ae_tag);
acl_e->e_perm = be16_to_cpu(ace->ae_perm);
switch (acl_e->e_tag) {
case ACL_USER:
case ACL_GROUP:
acl_e->e_id = be32_to_cpu(ace->ae_id);
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
acl_e->e_id = ACL_UNDEFINED_ID;
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
STATIC void
xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
{
const struct posix_acl_entry *acl_e;
struct xfs_acl_entry *ace;
int i;
aclp->acl_cnt = cpu_to_be32(acl->a_count);
for (i = 0; i < acl->a_count; i++) {
ace = &aclp->acl_entry[i];
acl_e = &acl->a_entries[i];
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
ace->ae_id = cpu_to_be32(acl_e->e_id);
ace->ae_perm = cpu_to_be16(acl_e->e_perm);
}
}
struct posix_acl *
xfs_get_acl(struct inode *inode, int type)
{
struct xfs_inode *ip = XFS_I(inode);
struct posix_acl *acl;
struct xfs_acl *xfs_acl;
int len = sizeof(struct xfs_acl);
unsigned char *ea_name;
int error;
acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED)
return acl;
trace_xfs_get_acl(ip);
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
ea_name = SGI_ACL_DEFAULT;
break;
default:
BUG();
}
/*
* If we have a cached ACLs value just return it, not need to
* go out to the disk.
*/
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
if (!xfs_acl)
return ERR_PTR(-ENOMEM);
error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
&len, ATTR_ROOT);
if (error) {
/*
* If the attribute doesn't exist make sure we have a negative
* cache entry, for any other error assume it is transient and
* leave the cache entry as ACL_NOT_CACHED.
*/
if (error == -ENOATTR) {
acl = NULL;
goto out_update_cache;
}
goto out;
}
acl = xfs_acl_from_disk(xfs_acl);
if (IS_ERR(acl))
goto out;
out_update_cache:
set_cached_acl(inode, type, acl);
out:
kfree(xfs_acl);
return acl;
}
STATIC int
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
struct xfs_inode *ip = XFS_I(inode);
unsigned char *ea_name;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
ea_name = SGI_ACL_DEFAULT;
break;
default:
return -EINVAL;
}
if (acl) {
struct xfs_acl *xfs_acl;
int len;
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
if (!xfs_acl)
return -ENOMEM;
xfs_acl_to_disk(xfs_acl, acl);
len = sizeof(struct xfs_acl) -
(sizeof(struct xfs_acl_entry) *
(XFS_ACL_MAX_ENTRIES - acl->a_count));
error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
len, ATTR_ROOT);
kfree(xfs_acl);
} else {
/*
* A NULL ACL argument means we want to remove the ACL.
*/
error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT);
/*
* If the attribute didn't exist to start with that's fine.
*/
if (error == -ENOATTR)
error = 0;
}
if (!error)
set_cached_acl(inode, type, acl);
return error;
}
static int
xfs_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
if (mode != inode->i_mode) {
struct iattr iattr;
iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
iattr.ia_mode = mode;
iattr.ia_ctime = current_fs_time(inode->i_sb);
error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
return error;
}
static int
xfs_acl_exists(struct inode *inode, unsigned char *name)
{
int len = sizeof(struct xfs_acl);
return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
ATTR_ROOT|ATTR_KERNOVAL) == 0);
}
int
posix_acl_access_exists(struct inode *inode)
{
return xfs_acl_exists(inode, SGI_ACL_FILE);
}
int
posix_acl_default_exists(struct inode *inode)
{
if (!S_ISDIR(inode->i_mode))
return 0;
return xfs_acl_exists(inode, SGI_ACL_DEFAULT);
}
/*
* No need for i_mutex because the inode is not yet exposed to the VFS.
*/
int
xfs_inherit_acl(struct inode *inode, struct posix_acl *acl)
{
umode_t mode = inode->i_mode;
int error = 0, inherit = 0;
if (S_ISDIR(inode->i_mode)) {
error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
if (error)
goto out;
}
error = posix_acl_create(&acl, GFP_KERNEL, &mode);
if (error < 0)
return error;
/*
* If posix_acl_create returns a positive value we need to
* inherit a permission that can't be represented using the Unix
* mode bits and we actually need to set an ACL.
*/
if (error > 0)
inherit = 1;
error = xfs_set_mode(inode, mode);
if (error)
goto out;
if (inherit)
error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl);
out:
posix_acl_release(acl);
return error;
}
int
xfs_acl_chmod(struct inode *inode)
{
struct posix_acl *acl;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (error)
return error;
error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
return error;
}
static int
xfs_xattr_acl_get(struct dentry *dentry, const char *name,
void *value, size_t size, int type)
{
struct posix_acl *acl;
int error;
acl = xfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
return -ENODATA;
error = posix_acl_to_xattr(acl, value, size);
posix_acl_release(acl);
return error;
}
static int
xfs_xattr_acl_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
struct inode *inode = dentry->d_inode;
struct posix_acl *acl = NULL;
int error = 0;
if (flags & XATTR_CREATE)
return -EINVAL;
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
return value ? -EACCES : 0;
if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
if (!value)
goto set_acl;
acl = posix_acl_from_xattr(value, size);
if (!acl) {
/*
* acl_set_file(3) may request that we set default ACLs with
* zero length -- defend (gracefully) against that here.
*/
goto out;
}
if (IS_ERR(acl)) {
error = PTR_ERR(acl);
goto out;
}
error = posix_acl_valid(acl);
if (error)
goto out_release;
error = -EINVAL;
if (acl->a_count > XFS_ACL_MAX_ENTRIES)
goto out_release;
if (type == ACL_TYPE_ACCESS) {
umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error <= 0) {
posix_acl_release(acl);
acl = NULL;
if (error < 0)
return error;
}
error = xfs_set_mode(inode, mode);
if (error)
goto out_release;
}
set_acl:
error = xfs_set_acl(inode, type, acl);
out_release:
posix_acl_release(acl);
out:
return error;
}
const struct xattr_handler xfs_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
.flags = ACL_TYPE_ACCESS,
.get = xfs_xattr_acl_get,
.set = xfs_xattr_acl_set,
};
const struct xattr_handler xfs_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
.get = xfs_xattr_acl_get,
.set = xfs_xattr_acl_set,
};
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3570_0 |
crossvul-cpp_data_good_4822_0 | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2016 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Authors: Rasmus Lerdorf <rasmus@php.net> |
| Marcus Boerger <helly@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
/* ToDos
*
* See if example images from http://www.exif.org have illegal
* thumbnail sizes or if code is corrupt.
* Create/Update exif headers.
* Create/Remove/Update image thumbnails.
*/
/* Security
*
* At current time i do not see any security problems but a potential
* attacker could generate an image with recursive ifd pointers...(Marcus)
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "ext/standard/file.h"
#if HAVE_EXIF
/* When EXIF_DEBUG is defined the module generates a lot of debug messages
* that help understanding what is going on. This can and should be used
* while extending the module as it shows if you are at the right position.
* You are always considered to have a copy of TIFF6.0 and EXIF2.10 standard.
*/
#undef EXIF_DEBUG
#ifdef EXIF_DEBUG
#define EXIFERR_DC , const char *_file, size_t _line TSRMLS_DC
#define EXIFERR_CC , __FILE__, __LINE__ TSRMLS_CC
#else
#define EXIFERR_DC TSRMLS_DC
#define EXIFERR_CC TSRMLS_CC
#endif
#undef EXIF_JPEG2000
#include "php_exif.h"
#include <math.h>
#include "php_ini.h"
#include "ext/standard/php_string.h"
#include "ext/standard/php_image.h"
#include "ext/standard/info.h"
/* needed for ssize_t definition */
#include <sys/types.h>
typedef unsigned char uchar;
#ifndef safe_emalloc
# define safe_emalloc(a,b,c) emalloc((a)*(b)+(c))
#endif
#ifndef safe_erealloc
# define safe_erealloc(p,a,b,c) erealloc(p, (a)*(b)+(c))
#endif
#ifndef TRUE
# define TRUE 1
# define FALSE 0
#endif
#ifndef max
# define max(a,b) ((a)>(b) ? (a) : (b))
#endif
#define EFREE_IF(ptr) if (ptr) efree(ptr)
#define MAX_IFD_NESTING_LEVEL 100
/* {{{ arginfo */
ZEND_BEGIN_ARG_INFO(arginfo_exif_tagname, 0)
ZEND_ARG_INFO(0, index)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_read_data, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(0, sections_needed)
ZEND_ARG_INFO(0, sub_arrays)
ZEND_ARG_INFO(0, read_thumbnail)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_exif_thumbnail, 0, 0, 1)
ZEND_ARG_INFO(0, filename)
ZEND_ARG_INFO(1, width)
ZEND_ARG_INFO(1, height)
ZEND_ARG_INFO(1, imagetype)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO(arginfo_exif_imagetype, 0)
ZEND_ARG_INFO(0, imagefile)
ZEND_END_ARG_INFO()
/* }}} */
/* {{{ exif_functions[]
*/
const zend_function_entry exif_functions[] = {
PHP_FE(exif_read_data, arginfo_exif_read_data)
PHP_FALIAS(read_exif_data, exif_read_data, arginfo_exif_read_data)
PHP_FE(exif_tagname, arginfo_exif_tagname)
PHP_FE(exif_thumbnail, arginfo_exif_thumbnail)
PHP_FE(exif_imagetype, arginfo_exif_imagetype)
PHP_FE_END
};
/* }}} */
#define EXIF_VERSION "1.4 $Id$"
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(exif)
{
php_info_print_table_start();
php_info_print_table_row(2, "EXIF Support", "enabled");
php_info_print_table_row(2, "EXIF Version", EXIF_VERSION);
php_info_print_table_row(2, "Supported EXIF Version", "0220");
php_info_print_table_row(2, "Supported filetypes", "JPEG,TIFF");
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
/* }}} */
ZEND_BEGIN_MODULE_GLOBALS(exif)
char * encode_unicode;
char * decode_unicode_be;
char * decode_unicode_le;
char * encode_jis;
char * decode_jis_be;
char * decode_jis_le;
ZEND_END_MODULE_GLOBALS(exif)
ZEND_DECLARE_MODULE_GLOBALS(exif)
#ifdef ZTS
#define EXIF_G(v) TSRMG(exif_globals_id, zend_exif_globals *, v)
#else
#define EXIF_G(v) (exif_globals.v)
#endif
/* {{{ PHP_INI
*/
ZEND_INI_MH(OnUpdateEncode)
{
if (new_value && new_value_length) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
ZEND_INI_MH(OnUpdateDecode)
{
if (new_value) {
const zend_encoding **return_list;
size_t return_size;
if (FAILURE == zend_multibyte_parse_encoding_list(new_value, new_value_length,
&return_list, &return_size, 0 TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal encoding ignored: '%s'", new_value);
return FAILURE;
}
efree(return_list);
}
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
}
PHP_INI_BEGIN()
STD_PHP_INI_ENTRY("exif.encode_unicode", "ISO-8859-15", PHP_INI_ALL, OnUpdateEncode, encode_unicode, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_motorola", "UCS-2BE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_unicode_intel", "UCS-2LE", PHP_INI_ALL, OnUpdateDecode, decode_unicode_le, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.encode_jis", "", PHP_INI_ALL, OnUpdateEncode, encode_jis, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_motorola", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_be, zend_exif_globals, exif_globals)
STD_PHP_INI_ENTRY("exif.decode_jis_intel", "JIS", PHP_INI_ALL, OnUpdateDecode, decode_jis_le, zend_exif_globals, exif_globals)
PHP_INI_END()
/* }}} */
/* {{{ PHP_GINIT_FUNCTION
*/
static PHP_GINIT_FUNCTION(exif)
{
exif_globals->encode_unicode = NULL;
exif_globals->decode_unicode_be = NULL;
exif_globals->decode_unicode_le = NULL;
exif_globals->encode_jis = NULL;
exif_globals->decode_jis_be = NULL;
exif_globals->decode_jis_le = NULL;
}
/* }}} */
/* {{{ PHP_MINIT_FUNCTION(exif)
Get the size of an image as 4-element array */
PHP_MINIT_FUNCTION(exif)
{
REGISTER_INI_ENTRIES();
if (zend_hash_exists(&module_registry, "mbstring", sizeof("mbstring"))) {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 1, CONST_CS | CONST_PERSISTENT);
} else {
REGISTER_LONG_CONSTANT("EXIF_USE_MBSTRING", 0, CONST_CS | CONST_PERSISTENT);
}
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MSHUTDOWN_FUNCTION
*/
PHP_MSHUTDOWN_FUNCTION(exif)
{
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
/* }}} */
/* {{{ exif dependencies */
static const zend_module_dep exif_module_deps[] = {
ZEND_MOD_REQUIRED("standard")
ZEND_MOD_OPTIONAL("mbstring")
ZEND_MOD_END
};
/* }}} */
/* {{{ exif_module_entry
*/
zend_module_entry exif_module_entry = {
STANDARD_MODULE_HEADER_EX, NULL,
exif_module_deps,
"exif",
exif_functions,
PHP_MINIT(exif),
PHP_MSHUTDOWN(exif),
NULL, NULL,
PHP_MINFO(exif),
#if ZEND_MODULE_API_NO >= 20010901
EXIF_VERSION,
#endif
#if ZEND_MODULE_API_NO >= 20060613
PHP_MODULE_GLOBALS(exif),
PHP_GINIT(exif),
NULL,
NULL,
STANDARD_MODULE_PROPERTIES_EX
#else
STANDARD_MODULE_PROPERTIES
#endif
};
/* }}} */
#ifdef COMPILE_DL_EXIF
ZEND_GET_MODULE(exif)
#endif
/* {{{ php_strnlen
* get length of string if buffer if less than buffer size or buffer size */
static size_t php_strnlen(char* str, size_t maxlen) {
size_t len = 0;
if (str && maxlen && *str) {
do {
len++;
} while (--maxlen && *(++str));
}
return len;
}
/* }}} */
/* {{{ error messages
*/
static const char * EXIF_ERROR_FILEEOF = "Unexpected end of file reached";
static const char * EXIF_ERROR_CORRUPT = "File structure corrupted";
static const char * EXIF_ERROR_THUMBEOF = "Thumbnail goes IFD boundary or end of file reached";
static const char * EXIF_ERROR_FSREALLOC = "Illegal reallocating of undefined file section";
#define EXIF_ERRLOG_FILEEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FILEEOF);
#define EXIF_ERRLOG_CORRUPT(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_CORRUPT);
#define EXIF_ERRLOG_THUMBEOF(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_THUMBEOF);
#define EXIF_ERRLOG_FSREALLOC(ImageInfo) exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s", EXIF_ERROR_FSREALLOC);
/* }}} */
/* {{{ format description defines
Describes format descriptor
*/
static int php_tiff_bytes_per_format[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8, 1};
#define NUM_FORMATS 13
#define TAG_FMT_BYTE 1
#define TAG_FMT_STRING 2
#define TAG_FMT_USHORT 3
#define TAG_FMT_ULONG 4
#define TAG_FMT_URATIONAL 5
#define TAG_FMT_SBYTE 6
#define TAG_FMT_UNDEFINED 7
#define TAG_FMT_SSHORT 8
#define TAG_FMT_SLONG 9
#define TAG_FMT_SRATIONAL 10
#define TAG_FMT_SINGLE 11
#define TAG_FMT_DOUBLE 12
#define TAG_FMT_IFD 13
#ifdef EXIF_DEBUG
static char *exif_get_tagformat(int format)
{
switch(format) {
case TAG_FMT_BYTE: return "BYTE";
case TAG_FMT_STRING: return "STRING";
case TAG_FMT_USHORT: return "USHORT";
case TAG_FMT_ULONG: return "ULONG";
case TAG_FMT_URATIONAL: return "URATIONAL";
case TAG_FMT_SBYTE: return "SBYTE";
case TAG_FMT_UNDEFINED: return "UNDEFINED";
case TAG_FMT_SSHORT: return "SSHORT";
case TAG_FMT_SLONG: return "SLONG";
case TAG_FMT_SRATIONAL: return "SRATIONAL";
case TAG_FMT_SINGLE: return "SINGLE";
case TAG_FMT_DOUBLE: return "DOUBLE";
case TAG_FMT_IFD: return "IFD";
}
return "*Illegal";
}
#endif
/* Describes tag values */
#define TAG_GPS_VERSION_ID 0x0000
#define TAG_GPS_LATITUDE_REF 0x0001
#define TAG_GPS_LATITUDE 0x0002
#define TAG_GPS_LONGITUDE_REF 0x0003
#define TAG_GPS_LONGITUDE 0x0004
#define TAG_GPS_ALTITUDE_REF 0x0005
#define TAG_GPS_ALTITUDE 0x0006
#define TAG_GPS_TIME_STAMP 0x0007
#define TAG_GPS_SATELLITES 0x0008
#define TAG_GPS_STATUS 0x0009
#define TAG_GPS_MEASURE_MODE 0x000A
#define TAG_GPS_DOP 0x000B
#define TAG_GPS_SPEED_REF 0x000C
#define TAG_GPS_SPEED 0x000D
#define TAG_GPS_TRACK_REF 0x000E
#define TAG_GPS_TRACK 0x000F
#define TAG_GPS_IMG_DIRECTION_REF 0x0010
#define TAG_GPS_IMG_DIRECTION 0x0011
#define TAG_GPS_MAP_DATUM 0x0012
#define TAG_GPS_DEST_LATITUDE_REF 0x0013
#define TAG_GPS_DEST_LATITUDE 0x0014
#define TAG_GPS_DEST_LONGITUDE_REF 0x0015
#define TAG_GPS_DEST_LONGITUDE 0x0016
#define TAG_GPS_DEST_BEARING_REF 0x0017
#define TAG_GPS_DEST_BEARING 0x0018
#define TAG_GPS_DEST_DISTANCE_REF 0x0019
#define TAG_GPS_DEST_DISTANCE 0x001A
#define TAG_GPS_PROCESSING_METHOD 0x001B
#define TAG_GPS_AREA_INFORMATION 0x001C
#define TAG_GPS_DATE_STAMP 0x001D
#define TAG_GPS_DIFFERENTIAL 0x001E
#define TAG_TIFF_COMMENT 0x00FE /* SHOUDLNT HAPPEN */
#define TAG_NEW_SUBFILE 0x00FE /* New version of subfile tag */
#define TAG_SUBFILE_TYPE 0x00FF /* Old version of subfile tag */
#define TAG_IMAGEWIDTH 0x0100
#define TAG_IMAGEHEIGHT 0x0101
#define TAG_BITS_PER_SAMPLE 0x0102
#define TAG_COMPRESSION 0x0103
#define TAG_PHOTOMETRIC_INTERPRETATION 0x0106
#define TAG_TRESHHOLDING 0x0107
#define TAG_CELL_WIDTH 0x0108
#define TAG_CELL_HEIGHT 0x0109
#define TAG_FILL_ORDER 0x010A
#define TAG_DOCUMENT_NAME 0x010D
#define TAG_IMAGE_DESCRIPTION 0x010E
#define TAG_MAKE 0x010F
#define TAG_MODEL 0x0110
#define TAG_STRIP_OFFSETS 0x0111
#define TAG_ORIENTATION 0x0112
#define TAG_SAMPLES_PER_PIXEL 0x0115
#define TAG_ROWS_PER_STRIP 0x0116
#define TAG_STRIP_BYTE_COUNTS 0x0117
#define TAG_MIN_SAMPPLE_VALUE 0x0118
#define TAG_MAX_SAMPLE_VALUE 0x0119
#define TAG_X_RESOLUTION 0x011A
#define TAG_Y_RESOLUTION 0x011B
#define TAG_PLANAR_CONFIGURATION 0x011C
#define TAG_PAGE_NAME 0x011D
#define TAG_X_POSITION 0x011E
#define TAG_Y_POSITION 0x011F
#define TAG_FREE_OFFSETS 0x0120
#define TAG_FREE_BYTE_COUNTS 0x0121
#define TAG_GRAY_RESPONSE_UNIT 0x0122
#define TAG_GRAY_RESPONSE_CURVE 0x0123
#define TAG_RESOLUTION_UNIT 0x0128
#define TAG_PAGE_NUMBER 0x0129
#define TAG_TRANSFER_FUNCTION 0x012D
#define TAG_SOFTWARE 0x0131
#define TAG_DATETIME 0x0132
#define TAG_ARTIST 0x013B
#define TAG_HOST_COMPUTER 0x013C
#define TAG_PREDICTOR 0x013D
#define TAG_WHITE_POINT 0x013E
#define TAG_PRIMARY_CHROMATICITIES 0x013F
#define TAG_COLOR_MAP 0x0140
#define TAG_HALFTONE_HINTS 0x0141
#define TAG_TILE_WIDTH 0x0142
#define TAG_TILE_LENGTH 0x0143
#define TAG_TILE_OFFSETS 0x0144
#define TAG_TILE_BYTE_COUNTS 0x0145
#define TAG_SUB_IFD 0x014A
#define TAG_INK_SETMPUTER 0x014C
#define TAG_INK_NAMES 0x014D
#define TAG_NUMBER_OF_INKS 0x014E
#define TAG_DOT_RANGE 0x0150
#define TAG_TARGET_PRINTER 0x0151
#define TAG_EXTRA_SAMPLE 0x0152
#define TAG_SAMPLE_FORMAT 0x0153
#define TAG_S_MIN_SAMPLE_VALUE 0x0154
#define TAG_S_MAX_SAMPLE_VALUE 0x0155
#define TAG_TRANSFER_RANGE 0x0156
#define TAG_JPEG_TABLES 0x015B
#define TAG_JPEG_PROC 0x0200
#define TAG_JPEG_INTERCHANGE_FORMAT 0x0201
#define TAG_JPEG_INTERCHANGE_FORMAT_LEN 0x0202
#define TAG_JPEG_RESTART_INTERVAL 0x0203
#define TAG_JPEG_LOSSLESS_PREDICTOR 0x0205
#define TAG_JPEG_POINT_TRANSFORMS 0x0206
#define TAG_JPEG_Q_TABLES 0x0207
#define TAG_JPEG_DC_TABLES 0x0208
#define TAG_JPEG_AC_TABLES 0x0209
#define TAG_YCC_COEFFICIENTS 0x0211
#define TAG_YCC_SUB_SAMPLING 0x0212
#define TAG_YCC_POSITIONING 0x0213
#define TAG_REFERENCE_BLACK_WHITE 0x0214
/* 0x0301 - 0x0302 */
/* 0x0320 */
/* 0x0343 */
/* 0x5001 - 0x501B */
/* 0x5021 - 0x503B */
/* 0x5090 - 0x5091 */
/* 0x5100 - 0x5101 */
/* 0x5110 - 0x5113 */
/* 0x80E3 - 0x80E6 */
/* 0x828d - 0x828F */
#define TAG_COPYRIGHT 0x8298
#define TAG_EXPOSURETIME 0x829A
#define TAG_FNUMBER 0x829D
#define TAG_EXIF_IFD_POINTER 0x8769
#define TAG_ICC_PROFILE 0x8773
#define TAG_EXPOSURE_PROGRAM 0x8822
#define TAG_SPECTRAL_SENSITY 0x8824
#define TAG_GPS_IFD_POINTER 0x8825
#define TAG_ISOSPEED 0x8827
#define TAG_OPTOELECTRIC_CONVERSION_F 0x8828
/* 0x8829 - 0x882b */
#define TAG_EXIFVERSION 0x9000
#define TAG_DATE_TIME_ORIGINAL 0x9003
#define TAG_DATE_TIME_DIGITIZED 0x9004
#define TAG_COMPONENT_CONFIG 0x9101
#define TAG_COMPRESSED_BITS_PER_PIXEL 0x9102
#define TAG_SHUTTERSPEED 0x9201
#define TAG_APERTURE 0x9202
#define TAG_BRIGHTNESS_VALUE 0x9203
#define TAG_EXPOSURE_BIAS_VALUE 0x9204
#define TAG_MAX_APERTURE 0x9205
#define TAG_SUBJECT_DISTANCE 0x9206
#define TAG_METRIC_MODULE 0x9207
#define TAG_LIGHT_SOURCE 0x9208
#define TAG_FLASH 0x9209
#define TAG_FOCAL_LENGTH 0x920A
/* 0x920B - 0x920D */
/* 0x9211 - 0x9216 */
#define TAG_SUBJECT_AREA 0x9214
#define TAG_MAKER_NOTE 0x927C
#define TAG_USERCOMMENT 0x9286
#define TAG_SUB_SEC_TIME 0x9290
#define TAG_SUB_SEC_TIME_ORIGINAL 0x9291
#define TAG_SUB_SEC_TIME_DIGITIZED 0x9292
/* 0x923F */
/* 0x935C */
#define TAG_XP_TITLE 0x9C9B
#define TAG_XP_COMMENTS 0x9C9C
#define TAG_XP_AUTHOR 0x9C9D
#define TAG_XP_KEYWORDS 0x9C9E
#define TAG_XP_SUBJECT 0x9C9F
#define TAG_FLASH_PIX_VERSION 0xA000
#define TAG_COLOR_SPACE 0xA001
#define TAG_COMP_IMAGE_WIDTH 0xA002 /* compressed images only */
#define TAG_COMP_IMAGE_HEIGHT 0xA003
#define TAG_RELATED_SOUND_FILE 0xA004
#define TAG_INTEROP_IFD_POINTER 0xA005 /* IFD pointer */
#define TAG_FLASH_ENERGY 0xA20B
#define TAG_SPATIAL_FREQUENCY_RESPONSE 0xA20C
#define TAG_FOCALPLANE_X_RES 0xA20E
#define TAG_FOCALPLANE_Y_RES 0xA20F
#define TAG_FOCALPLANE_RESOLUTION_UNIT 0xA210
#define TAG_SUBJECT_LOCATION 0xA214
#define TAG_EXPOSURE_INDEX 0xA215
#define TAG_SENSING_METHOD 0xA217
#define TAG_FILE_SOURCE 0xA300
#define TAG_SCENE_TYPE 0xA301
#define TAG_CFA_PATTERN 0xA302
#define TAG_CUSTOM_RENDERED 0xA401
#define TAG_EXPOSURE_MODE 0xA402
#define TAG_WHITE_BALANCE 0xA403
#define TAG_DIGITAL_ZOOM_RATIO 0xA404
#define TAG_FOCAL_LENGTH_IN_35_MM_FILM 0xA405
#define TAG_SCENE_CAPTURE_TYPE 0xA406
#define TAG_GAIN_CONTROL 0xA407
#define TAG_CONTRAST 0xA408
#define TAG_SATURATION 0xA409
#define TAG_SHARPNESS 0xA40A
#define TAG_DEVICE_SETTING_DESCRIPTION 0xA40B
#define TAG_SUBJECT_DISTANCE_RANGE 0xA40C
#define TAG_IMAGE_UNIQUE_ID 0xA420
/* Olympus specific tags */
#define TAG_OLYMPUS_SPECIALMODE 0x0200
#define TAG_OLYMPUS_JPEGQUAL 0x0201
#define TAG_OLYMPUS_MACRO 0x0202
#define TAG_OLYMPUS_DIGIZOOM 0x0204
#define TAG_OLYMPUS_SOFTWARERELEASE 0x0207
#define TAG_OLYMPUS_PICTINFO 0x0208
#define TAG_OLYMPUS_CAMERAID 0x0209
/* end Olympus specific tags */
/* Internal */
#define TAG_NONE -1 /* note that -1 <> 0xFFFF */
#define TAG_COMPUTED_VALUE -2
#define TAG_END_OF_LIST 0xFFFD
/* Values for TAG_PHOTOMETRIC_INTERPRETATION */
#define PMI_BLACK_IS_ZERO 0
#define PMI_WHITE_IS_ZERO 1
#define PMI_RGB 2
#define PMI_PALETTE_COLOR 3
#define PMI_TRANSPARENCY_MASK 4
#define PMI_SEPARATED 5
#define PMI_YCBCR 6
#define PMI_CIELAB 8
/* }}} */
/* {{{ TabTable[]
*/
typedef const struct {
unsigned short Tag;
char *Desc;
} tag_info_type;
typedef tag_info_type tag_info_array[];
typedef tag_info_type *tag_table_type;
#define TAG_TABLE_END \
{TAG_NONE, "No tag value"},\
{TAG_COMPUTED_VALUE, "Computed value"},\
{TAG_END_OF_LIST, ""} /* Important for exif_get_tagname() IF value != "" function result is != false */
static tag_info_array tag_table_IFD = {
{ 0x000B, "ACDComment"},
{ 0x00FE, "NewSubFile"}, /* better name it 'ImageType' ? */
{ 0x00FF, "SubFile"},
{ 0x0100, "ImageWidth"},
{ 0x0101, "ImageLength"},
{ 0x0102, "BitsPerSample"},
{ 0x0103, "Compression"},
{ 0x0106, "PhotometricInterpretation"},
{ 0x010A, "FillOrder"},
{ 0x010D, "DocumentName"},
{ 0x010E, "ImageDescription"},
{ 0x010F, "Make"},
{ 0x0110, "Model"},
{ 0x0111, "StripOffsets"},
{ 0x0112, "Orientation"},
{ 0x0115, "SamplesPerPixel"},
{ 0x0116, "RowsPerStrip"},
{ 0x0117, "StripByteCounts"},
{ 0x0118, "MinSampleValue"},
{ 0x0119, "MaxSampleValue"},
{ 0x011A, "XResolution"},
{ 0x011B, "YResolution"},
{ 0x011C, "PlanarConfiguration"},
{ 0x011D, "PageName"},
{ 0x011E, "XPosition"},
{ 0x011F, "YPosition"},
{ 0x0120, "FreeOffsets"},
{ 0x0121, "FreeByteCounts"},
{ 0x0122, "GrayResponseUnit"},
{ 0x0123, "GrayResponseCurve"},
{ 0x0124, "T4Options"},
{ 0x0125, "T6Options"},
{ 0x0128, "ResolutionUnit"},
{ 0x0129, "PageNumber"},
{ 0x012D, "TransferFunction"},
{ 0x0131, "Software"},
{ 0x0132, "DateTime"},
{ 0x013B, "Artist"},
{ 0x013C, "HostComputer"},
{ 0x013D, "Predictor"},
{ 0x013E, "WhitePoint"},
{ 0x013F, "PrimaryChromaticities"},
{ 0x0140, "ColorMap"},
{ 0x0141, "HalfToneHints"},
{ 0x0142, "TileWidth"},
{ 0x0143, "TileLength"},
{ 0x0144, "TileOffsets"},
{ 0x0145, "TileByteCounts"},
{ 0x014A, "SubIFD"},
{ 0x014C, "InkSet"},
{ 0x014D, "InkNames"},
{ 0x014E, "NumberOfInks"},
{ 0x0150, "DotRange"},
{ 0x0151, "TargetPrinter"},
{ 0x0152, "ExtraSample"},
{ 0x0153, "SampleFormat"},
{ 0x0154, "SMinSampleValue"},
{ 0x0155, "SMaxSampleValue"},
{ 0x0156, "TransferRange"},
{ 0x0157, "ClipPath"},
{ 0x0158, "XClipPathUnits"},
{ 0x0159, "YClipPathUnits"},
{ 0x015A, "Indexed"},
{ 0x015B, "JPEGTables"},
{ 0x015F, "OPIProxy"},
{ 0x0200, "JPEGProc"},
{ 0x0201, "JPEGInterchangeFormat"},
{ 0x0202, "JPEGInterchangeFormatLength"},
{ 0x0203, "JPEGRestartInterval"},
{ 0x0205, "JPEGLosslessPredictors"},
{ 0x0206, "JPEGPointTransforms"},
{ 0x0207, "JPEGQTables"},
{ 0x0208, "JPEGDCTables"},
{ 0x0209, "JPEGACTables"},
{ 0x0211, "YCbCrCoefficients"},
{ 0x0212, "YCbCrSubSampling"},
{ 0x0213, "YCbCrPositioning"},
{ 0x0214, "ReferenceBlackWhite"},
{ 0x02BC, "ExtensibleMetadataPlatform"}, /* XAP: Extensible Authoring Publishing, obsoleted by XMP: Extensible Metadata Platform */
{ 0x0301, "Gamma"},
{ 0x0302, "ICCProfileDescriptor"},
{ 0x0303, "SRGBRenderingIntent"},
{ 0x0320, "ImageTitle"},
{ 0x5001, "ResolutionXUnit"},
{ 0x5002, "ResolutionYUnit"},
{ 0x5003, "ResolutionXLengthUnit"},
{ 0x5004, "ResolutionYLengthUnit"},
{ 0x5005, "PrintFlags"},
{ 0x5006, "PrintFlagsVersion"},
{ 0x5007, "PrintFlagsCrop"},
{ 0x5008, "PrintFlagsBleedWidth"},
{ 0x5009, "PrintFlagsBleedWidthScale"},
{ 0x500A, "HalftoneLPI"},
{ 0x500B, "HalftoneLPIUnit"},
{ 0x500C, "HalftoneDegree"},
{ 0x500D, "HalftoneShape"},
{ 0x500E, "HalftoneMisc"},
{ 0x500F, "HalftoneScreen"},
{ 0x5010, "JPEGQuality"},
{ 0x5011, "GridSize"},
{ 0x5012, "ThumbnailFormat"},
{ 0x5013, "ThumbnailWidth"},
{ 0x5014, "ThumbnailHeight"},
{ 0x5015, "ThumbnailColorDepth"},
{ 0x5016, "ThumbnailPlanes"},
{ 0x5017, "ThumbnailRawBytes"},
{ 0x5018, "ThumbnailSize"},
{ 0x5019, "ThumbnailCompressedSize"},
{ 0x501A, "ColorTransferFunction"},
{ 0x501B, "ThumbnailData"},
{ 0x5020, "ThumbnailImageWidth"},
{ 0x5021, "ThumbnailImageHeight"},
{ 0x5022, "ThumbnailBitsPerSample"},
{ 0x5023, "ThumbnailCompression"},
{ 0x5024, "ThumbnailPhotometricInterp"},
{ 0x5025, "ThumbnailImageDescription"},
{ 0x5026, "ThumbnailEquipMake"},
{ 0x5027, "ThumbnailEquipModel"},
{ 0x5028, "ThumbnailStripOffsets"},
{ 0x5029, "ThumbnailOrientation"},
{ 0x502A, "ThumbnailSamplesPerPixel"},
{ 0x502B, "ThumbnailRowsPerStrip"},
{ 0x502C, "ThumbnailStripBytesCount"},
{ 0x502D, "ThumbnailResolutionX"},
{ 0x502E, "ThumbnailResolutionY"},
{ 0x502F, "ThumbnailPlanarConfig"},
{ 0x5030, "ThumbnailResolutionUnit"},
{ 0x5031, "ThumbnailTransferFunction"},
{ 0x5032, "ThumbnailSoftwareUsed"},
{ 0x5033, "ThumbnailDateTime"},
{ 0x5034, "ThumbnailArtist"},
{ 0x5035, "ThumbnailWhitePoint"},
{ 0x5036, "ThumbnailPrimaryChromaticities"},
{ 0x5037, "ThumbnailYCbCrCoefficients"},
{ 0x5038, "ThumbnailYCbCrSubsampling"},
{ 0x5039, "ThumbnailYCbCrPositioning"},
{ 0x503A, "ThumbnailRefBlackWhite"},
{ 0x503B, "ThumbnailCopyRight"},
{ 0x5090, "LuminanceTable"},
{ 0x5091, "ChrominanceTable"},
{ 0x5100, "FrameDelay"},
{ 0x5101, "LoopCount"},
{ 0x5110, "PixelUnit"},
{ 0x5111, "PixelPerUnitX"},
{ 0x5112, "PixelPerUnitY"},
{ 0x5113, "PaletteHistogram"},
{ 0x1000, "RelatedImageFileFormat"},
{ 0x800D, "ImageID"},
{ 0x80E3, "Matteing"}, /* obsoleted by ExtraSamples */
{ 0x80E4, "DataType"}, /* obsoleted by SampleFormat */
{ 0x80E5, "ImageDepth"},
{ 0x80E6, "TileDepth"},
{ 0x828D, "CFARepeatPatternDim"},
{ 0x828E, "CFAPattern"},
{ 0x828F, "BatteryLevel"},
{ 0x8298, "Copyright"},
{ 0x829A, "ExposureTime"},
{ 0x829D, "FNumber"},
{ 0x83BB, "IPTC/NAA"},
{ 0x84E3, "IT8RasterPadding"},
{ 0x84E5, "IT8ColorTable"},
{ 0x8649, "ImageResourceInformation"}, /* PhotoShop */
{ 0x8769, "Exif_IFD_Pointer"},
{ 0x8773, "ICC_Profile"},
{ 0x8822, "ExposureProgram"},
{ 0x8824, "SpectralSensity"},
{ 0x8828, "OECF"},
{ 0x8825, "GPS_IFD_Pointer"},
{ 0x8827, "ISOSpeedRatings"},
{ 0x8828, "OECF"},
{ 0x9000, "ExifVersion"},
{ 0x9003, "DateTimeOriginal"},
{ 0x9004, "DateTimeDigitized"},
{ 0x9101, "ComponentsConfiguration"},
{ 0x9102, "CompressedBitsPerPixel"},
{ 0x9201, "ShutterSpeedValue"},
{ 0x9202, "ApertureValue"},
{ 0x9203, "BrightnessValue"},
{ 0x9204, "ExposureBiasValue"},
{ 0x9205, "MaxApertureValue"},
{ 0x9206, "SubjectDistance"},
{ 0x9207, "MeteringMode"},
{ 0x9208, "LightSource"},
{ 0x9209, "Flash"},
{ 0x920A, "FocalLength"},
{ 0x920B, "FlashEnergy"}, /* 0xA20B in JPEG */
{ 0x920C, "SpatialFrequencyResponse"}, /* 0xA20C - - */
{ 0x920D, "Noise"},
{ 0x920E, "FocalPlaneXResolution"}, /* 0xA20E - - */
{ 0x920F, "FocalPlaneYResolution"}, /* 0xA20F - - */
{ 0x9210, "FocalPlaneResolutionUnit"}, /* 0xA210 - - */
{ 0x9211, "ImageNumber"},
{ 0x9212, "SecurityClassification"},
{ 0x9213, "ImageHistory"},
{ 0x9214, "SubjectLocation"}, /* 0xA214 - - */
{ 0x9215, "ExposureIndex"}, /* 0xA215 - - */
{ 0x9216, "TIFF/EPStandardID"},
{ 0x9217, "SensingMethod"}, /* 0xA217 - - */
{ 0x923F, "StoNits"},
{ 0x927C, "MakerNote"},
{ 0x9286, "UserComment"},
{ 0x9290, "SubSecTime"},
{ 0x9291, "SubSecTimeOriginal"},
{ 0x9292, "SubSecTimeDigitized"},
{ 0x935C, "ImageSourceData"}, /* "Adobe Photoshop Document Data Block": 8BIM... */
{ 0x9c9b, "Title" }, /* Win XP specific, Unicode */
{ 0x9c9c, "Comments" }, /* Win XP specific, Unicode */
{ 0x9c9d, "Author" }, /* Win XP specific, Unicode */
{ 0x9c9e, "Keywords" }, /* Win XP specific, Unicode */
{ 0x9c9f, "Subject" }, /* Win XP specific, Unicode, not to be confused with SubjectDistance and SubjectLocation */
{ 0xA000, "FlashPixVersion"},
{ 0xA001, "ColorSpace"},
{ 0xA002, "ExifImageWidth"},
{ 0xA003, "ExifImageLength"},
{ 0xA004, "RelatedSoundFile"},
{ 0xA005, "InteroperabilityOffset"},
{ 0xA20B, "FlashEnergy"}, /* 0x920B in TIFF/EP */
{ 0xA20C, "SpatialFrequencyResponse"}, /* 0x920C - - */
{ 0xA20D, "Noise"},
{ 0xA20E, "FocalPlaneXResolution"}, /* 0x920E - - */
{ 0xA20F, "FocalPlaneYResolution"}, /* 0x920F - - */
{ 0xA210, "FocalPlaneResolutionUnit"}, /* 0x9210 - - */
{ 0xA211, "ImageNumber"},
{ 0xA212, "SecurityClassification"},
{ 0xA213, "ImageHistory"},
{ 0xA214, "SubjectLocation"}, /* 0x9214 - - */
{ 0xA215, "ExposureIndex"}, /* 0x9215 - - */
{ 0xA216, "TIFF/EPStandardID"},
{ 0xA217, "SensingMethod"}, /* 0x9217 - - */
{ 0xA300, "FileSource"},
{ 0xA301, "SceneType"},
{ 0xA302, "CFAPattern"},
{ 0xA401, "CustomRendered"},
{ 0xA402, "ExposureMode"},
{ 0xA403, "WhiteBalance"},
{ 0xA404, "DigitalZoomRatio"},
{ 0xA405, "FocalLengthIn35mmFilm"},
{ 0xA406, "SceneCaptureType"},
{ 0xA407, "GainControl"},
{ 0xA408, "Contrast"},
{ 0xA409, "Saturation"},
{ 0xA40A, "Sharpness"},
{ 0xA40B, "DeviceSettingDescription"},
{ 0xA40C, "SubjectDistanceRange"},
{ 0xA420, "ImageUniqueID"},
TAG_TABLE_END
} ;
static tag_info_array tag_table_GPS = {
{ 0x0000, "GPSVersion"},
{ 0x0001, "GPSLatitudeRef"},
{ 0x0002, "GPSLatitude"},
{ 0x0003, "GPSLongitudeRef"},
{ 0x0004, "GPSLongitude"},
{ 0x0005, "GPSAltitudeRef"},
{ 0x0006, "GPSAltitude"},
{ 0x0007, "GPSTimeStamp"},
{ 0x0008, "GPSSatellites"},
{ 0x0009, "GPSStatus"},
{ 0x000A, "GPSMeasureMode"},
{ 0x000B, "GPSDOP"},
{ 0x000C, "GPSSpeedRef"},
{ 0x000D, "GPSSpeed"},
{ 0x000E, "GPSTrackRef"},
{ 0x000F, "GPSTrack"},
{ 0x0010, "GPSImgDirectionRef"},
{ 0x0011, "GPSImgDirection"},
{ 0x0012, "GPSMapDatum"},
{ 0x0013, "GPSDestLatitudeRef"},
{ 0x0014, "GPSDestLatitude"},
{ 0x0015, "GPSDestLongitudeRef"},
{ 0x0016, "GPSDestLongitude"},
{ 0x0017, "GPSDestBearingRef"},
{ 0x0018, "GPSDestBearing"},
{ 0x0019, "GPSDestDistanceRef"},
{ 0x001A, "GPSDestDistance"},
{ 0x001B, "GPSProcessingMode"},
{ 0x001C, "GPSAreaInformation"},
{ 0x001D, "GPSDateStamp"},
{ 0x001E, "GPSDifferential"},
TAG_TABLE_END
};
static tag_info_array tag_table_IOP = {
{ 0x0001, "InterOperabilityIndex"}, /* should be 'R98' or 'THM' */
{ 0x0002, "InterOperabilityVersion"},
{ 0x1000, "RelatedFileFormat"},
{ 0x1001, "RelatedImageWidth"},
{ 0x1002, "RelatedImageHeight"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CANON = {
{ 0x0001, "ModeArray"}, /* guess */
{ 0x0004, "ImageInfo"}, /* guess */
{ 0x0006, "ImageType"},
{ 0x0007, "FirmwareVersion"},
{ 0x0008, "ImageNumber"},
{ 0x0009, "OwnerName"},
{ 0x000C, "Camera"},
{ 0x000F, "CustomFunctions"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_CASIO = {
{ 0x0001, "RecordingMode"},
{ 0x0002, "Quality"},
{ 0x0003, "FocusingMode"},
{ 0x0004, "FlashMode"},
{ 0x0005, "FlashIntensity"},
{ 0x0006, "ObjectDistance"},
{ 0x0007, "WhiteBalance"},
{ 0x000A, "DigitalZoom"},
{ 0x000B, "Sharpness"},
{ 0x000C, "Contrast"},
{ 0x000D, "Saturation"},
{ 0x0014, "CCDSensitivity"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_FUJI = {
{ 0x0000, "Version"},
{ 0x1000, "Quality"},
{ 0x1001, "Sharpness"},
{ 0x1002, "WhiteBalance"},
{ 0x1003, "Color"},
{ 0x1004, "Tone"},
{ 0x1010, "FlashMode"},
{ 0x1011, "FlashStrength"},
{ 0x1020, "Macro"},
{ 0x1021, "FocusMode"},
{ 0x1030, "SlowSync"},
{ 0x1031, "PictureMode"},
{ 0x1100, "ContTake"},
{ 0x1300, "BlurWarning"},
{ 0x1301, "FocusWarning"},
{ 0x1302, "AEWarning "},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON = {
{ 0x0003, "Quality"},
{ 0x0004, "ColorMode"},
{ 0x0005, "ImageAdjustment"},
{ 0x0006, "CCDSensitivity"},
{ 0x0007, "WhiteBalance"},
{ 0x0008, "Focus"},
{ 0x000a, "DigitalZoom"},
{ 0x000b, "Converter"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_NIKON_990 = {
{ 0x0001, "Version"},
{ 0x0002, "ISOSetting"},
{ 0x0003, "ColorMode"},
{ 0x0004, "Quality"},
{ 0x0005, "WhiteBalance"},
{ 0x0006, "ImageSharpening"},
{ 0x0007, "FocusMode"},
{ 0x0008, "FlashSetting"},
{ 0x000F, "ISOSelection"},
{ 0x0080, "ImageAdjustment"},
{ 0x0082, "AuxiliaryLens"},
{ 0x0085, "ManualFocusDistance"},
{ 0x0086, "DigitalZoom"},
{ 0x0088, "AFFocusPosition"},
{ 0x0010, "DataDump"},
TAG_TABLE_END
};
static tag_info_array tag_table_VND_OLYMPUS = {
{ 0x0200, "SpecialMode"},
{ 0x0201, "JPEGQuality"},
{ 0x0202, "Macro"},
{ 0x0204, "DigitalZoom"},
{ 0x0207, "SoftwareRelease"},
{ 0x0208, "PictureInfo"},
{ 0x0209, "CameraId"},
{ 0x0F00, "DataDump"},
TAG_TABLE_END
};
typedef enum mn_byte_order_t {
MN_ORDER_INTEL = 0,
MN_ORDER_MOTOROLA = 1,
MN_ORDER_NORMAL
} mn_byte_order_t;
typedef enum mn_offset_mode_t {
MN_OFFSET_NORMAL,
MN_OFFSET_MAKER,
MN_OFFSET_GUESS
} mn_offset_mode_t;
typedef struct {
tag_table_type tag_table;
char * make;
char * model;
char * id_string;
int id_string_len;
int offset;
mn_byte_order_t byte_order;
mn_offset_mode_t offset_mode;
} maker_note_type;
static const maker_note_type maker_note_array[] = {
{ tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_INTEL, MN_OFFSET_GUESS},
/* { tag_table_VND_CANON, "Canon", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},*/
{ tag_table_VND_CASIO, "CASIO", NULL, NULL, 0, 0, MN_ORDER_MOTOROLA, MN_OFFSET_NORMAL},
{ tag_table_VND_FUJI, "FUJIFILM", NULL, "FUJIFILM\x0C\x00\x00\x00", 12, 12, MN_ORDER_INTEL, MN_OFFSET_MAKER},
{ tag_table_VND_NIKON, "NIKON", NULL, "Nikon\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_NIKON_990, "NIKON", NULL, NULL, 0, 0, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
{ tag_table_VND_OLYMPUS, "OLYMPUS OPTICAL CO.,LTD", NULL, "OLYMP\x00\x01\x00", 8, 8, MN_ORDER_NORMAL, MN_OFFSET_NORMAL},
};
/* }}} */
/* {{{ exif_get_tagname
Get headername for tag_num or NULL if not defined */
static char * exif_get_tagname(int tag_num, char *ret, int len, tag_table_type tag_table TSRMLS_DC)
{
int i, t;
char tmp[32];
for (i = 0; (t = tag_table[i].Tag) != TAG_END_OF_LIST; i++) {
if (t == tag_num) {
if (ret && len) {
strlcpy(ret, tag_table[i].Desc, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return tag_table[i].Desc;
}
}
if (ret && len) {
snprintf(tmp, sizeof(tmp), "UndefinedTag:0x%04X", tag_num);
strlcpy(ret, tmp, abs(len));
if (len < 0) {
memset(ret + strlen(ret), ' ', -len - strlen(ret) - 1);
ret[-len - 1] = '\0';
}
return ret;
}
return "";
}
/* }}} */
/* {{{ exif_char_dump
* Do not use! This is a debug function... */
#ifdef EXIF_DEBUG
static unsigned char* exif_char_dump(unsigned char * addr, int len, int offset)
{
static unsigned char buf[4096+1];
static unsigned char tmp[20];
int c, i, p=0, n = 5+31;
p += slprintf(buf+p, sizeof(buf)-p, "\nDump Len: %08X (%d)", len, len);
if (len) {
for(i=0; i<len+15 && p+n<=sizeof(buf); i++) {
if (i%16==0) {
p += slprintf(buf+p, sizeof(buf)-p, "\n%08X: ", i+offset);
}
if (i<len) {
c = *addr++;
p += slprintf(buf+p, sizeof(buf)-p, "%02X ", c);
tmp[i%16] = c>=32 ? c : '.';
tmp[(i%16)+1] = '\0';
} else {
p += slprintf(buf+p, sizeof(buf)-p, " ");
}
if (i%16==15) {
p += slprintf(buf+p, sizeof(buf)-p, " %s", tmp);
if (i>=len) {
break;
}
}
}
}
buf[sizeof(buf)-1] = '\0';
return buf;
}
#endif
/* }}} */
/* {{{ php_jpg_get16
Get 16 bits motorola order (always) for jpeg header stuff.
*/
static int php_jpg_get16(void *value)
{
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
}
/* }}} */
/* {{{ php_ifd_get16u
* Convert a 16 bit unsigned value from file's native byte order */
static int php_ifd_get16u(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((uchar *)value)[0] << 8) | ((uchar *)value)[1];
} else {
return (((uchar *)value)[1] << 8) | ((uchar *)value)[0];
}
}
/* }}} */
/* {{{ php_ifd_get16s
* Convert a 16 bit signed value from file's native byte order */
static signed short php_ifd_get16s(void *value, int motorola_intel)
{
return (signed short)php_ifd_get16u(value, motorola_intel);
}
/* }}} */
/* {{{ php_ifd_get32s
* Convert a 32 bit signed value from file's native byte order */
static int php_ifd_get32s(void *value, int motorola_intel)
{
if (motorola_intel) {
return (((char *)value)[0] << 24)
| (((uchar *)value)[1] << 16)
| (((uchar *)value)[2] << 8 )
| (((uchar *)value)[3] );
} else {
return (((char *)value)[3] << 24)
| (((uchar *)value)[2] << 16)
| (((uchar *)value)[1] << 8 )
| (((uchar *)value)[0] );
}
}
/* }}} */
/* {{{ php_ifd_get32u
* Write 32 bit unsigned value to data */
static unsigned php_ifd_get32u(void *value, int motorola_intel)
{
return (unsigned)php_ifd_get32s(value, motorola_intel) & 0xffffffff;
}
/* }}} */
/* {{{ php_ifd_set16u
* Write 16 bit unsigned value to data */
static void php_ifd_set16u(char *data, unsigned int value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF00) >> 8;
data[1] = (value & 0x00FF);
} else {
data[1] = (value & 0xFF00) >> 8;
data[0] = (value & 0x00FF);
}
}
/* }}} */
/* {{{ php_ifd_set32u
* Convert a 32 bit unsigned value from file's native byte order */
static void php_ifd_set32u(char *data, size_t value, int motorola_intel)
{
if (motorola_intel) {
data[0] = (value & 0xFF000000) >> 24;
data[1] = (value & 0x00FF0000) >> 16;
data[2] = (value & 0x0000FF00) >> 8;
data[3] = (value & 0x000000FF);
} else {
data[3] = (value & 0xFF000000) >> 24;
data[2] = (value & 0x00FF0000) >> 16;
data[1] = (value & 0x0000FF00) >> 8;
data[0] = (value & 0x000000FF);
}
}
/* }}} */
#ifdef EXIF_DEBUG
char * exif_dump_data(int *dump_free, int format, int components, int length, int motorola_intel, char *value_ptr TSRMLS_DC) /* {{{ */
{
char *dump;
int len;
*dump_free = 0;
if (format == TAG_FMT_STRING) {
return value_ptr ? value_ptr : "<no data>";
}
if (format == TAG_FMT_UNDEFINED) {
return "<undefined>\n";
}
if (format == TAG_FMT_IFD) {
return "";
}
if (format == TAG_FMT_SINGLE || format == TAG_FMT_DOUBLE) {
return "<not implemented>";
}
*dump_free = 1;
if (components > 1) {
len = spprintf(&dump, 0, "(%d,%d) {", components, length);
} else {
len = spprintf(&dump, 0, "{");
}
while(components > 0) {
switch(format) {
case TAG_FMT_BYTE:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
case TAG_FMT_SBYTE:
dump = erealloc(dump, len + 4 + 1);
snprintf(dump + len, 4 + 1, "0x%02X", *value_ptr);
len += 4;
value_ptr++;
break;
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get16s(value_ptr, motorola_intel));
len += 6;
value_ptr += 2;
break;
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
dump = erealloc(dump, len + 6 + 1);
snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get32s(value_ptr, motorola_intel));
len += 6;
value_ptr += 4;
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
dump = erealloc(dump, len + 13 + 1);
snprintf(dump + len, 13 + 1, "0x%04X/0x%04X", php_ifd_get32s(value_ptr, motorola_intel), php_ifd_get32s(value_ptr+4, motorola_intel));
len += 13;
value_ptr += 8;
break;
}
if (components > 0) {
dump = erealloc(dump, len + 2 + 1);
snprintf(dump + len, 2 + 1, ", ");
len += 2;
components--;
} else{
break;
}
}
dump = erealloc(dump, len + 1 + 1);
snprintf(dump + len, 1 + 1, "}");
return dump;
}
/* }}} */
#endif
/* {{{ exif_convert_any_format
* Evaluate number, be it int, rational, or float from directory. */
static double exif_convert_any_format(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return (double)php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return (double)php_ifd_get32s(value, motorola_intel) / s_den;
}
case TAG_FMT_SSHORT: return (signed short)php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (double)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return *(double *)value;
}
return 0;
}
/* }}} */
/* {{{ exif_convert_any_to_int
* Evaluate number, be it int, rational, or float from directory. */
static size_t exif_convert_any_to_int(void *value, int format, int motorola_intel TSRMLS_DC)
{
int s_den;
unsigned u_den;
switch(format) {
case TAG_FMT_SBYTE: return *(signed char *)value;
case TAG_FMT_BYTE: return *(uchar *)value;
case TAG_FMT_USHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_ULONG: return php_ifd_get32u(value, motorola_intel);
case TAG_FMT_URATIONAL:
u_den = php_ifd_get32u(4+(char *)value, motorola_intel);
if (u_den == 0) {
return 0;
} else {
return php_ifd_get32u(value, motorola_intel) / u_den;
}
case TAG_FMT_SRATIONAL:
s_den = php_ifd_get32s(4+(char *)value, motorola_intel);
if (s_den == 0) {
return 0;
} else {
return (size_t)((double)php_ifd_get32s(value, motorola_intel) / s_den);
}
case TAG_FMT_SSHORT: return php_ifd_get16u(value, motorola_intel);
case TAG_FMT_SLONG: return php_ifd_get32s(value, motorola_intel);
/* Not sure if this is correct (never seen float used in Exif format) */
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type single");
#endif
return (size_t)*(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Found value of type double");
#endif
return (size_t)*(double *)value;
}
return 0;
}
/* }}} */
/* {{{ struct image_info_value, image_info_list
*/
#ifndef WORD
#define WORD unsigned short
#endif
#ifndef DWORD
#define DWORD unsigned int
#endif
typedef struct {
int num;
int den;
} signed_rational;
typedef struct {
unsigned int num;
unsigned int den;
} unsigned_rational;
typedef union _image_info_value {
char *s;
unsigned u;
int i;
float f;
double d;
signed_rational sr;
unsigned_rational ur;
union _image_info_value *list;
} image_info_value;
typedef struct {
WORD tag;
WORD format;
DWORD length;
DWORD dummy; /* value ptr of tiff directory entry */
char *name;
image_info_value value;
} image_info_data;
typedef struct {
int count;
image_info_data *list;
} image_info_list;
/* }}} */
/* {{{ exif_get_sectionname
Returns the name of a section
*/
#define SECTION_FILE 0
#define SECTION_COMPUTED 1
#define SECTION_ANY_TAG 2
#define SECTION_IFD0 3
#define SECTION_THUMBNAIL 4
#define SECTION_COMMENT 5
#define SECTION_APP0 6
#define SECTION_EXIF 7
#define SECTION_FPIX 8
#define SECTION_GPS 9
#define SECTION_INTEROP 10
#define SECTION_APP12 11
#define SECTION_WINXP 12
#define SECTION_MAKERNOTE 13
#define SECTION_COUNT 14
#define FOUND_FILE (1<<SECTION_FILE)
#define FOUND_COMPUTED (1<<SECTION_COMPUTED)
#define FOUND_ANY_TAG (1<<SECTION_ANY_TAG)
#define FOUND_IFD0 (1<<SECTION_IFD0)
#define FOUND_THUMBNAIL (1<<SECTION_THUMBNAIL)
#define FOUND_COMMENT (1<<SECTION_COMMENT)
#define FOUND_APP0 (1<<SECTION_APP0)
#define FOUND_EXIF (1<<SECTION_EXIF)
#define FOUND_FPIX (1<<SECTION_FPIX)
#define FOUND_GPS (1<<SECTION_GPS)
#define FOUND_INTEROP (1<<SECTION_INTEROP)
#define FOUND_APP12 (1<<SECTION_APP12)
#define FOUND_WINXP (1<<SECTION_WINXP)
#define FOUND_MAKERNOTE (1<<SECTION_MAKERNOTE)
static char *exif_get_sectionname(int section)
{
switch(section) {
case SECTION_FILE: return "FILE";
case SECTION_COMPUTED: return "COMPUTED";
case SECTION_ANY_TAG: return "ANY_TAG";
case SECTION_IFD0: return "IFD0";
case SECTION_THUMBNAIL: return "THUMBNAIL";
case SECTION_COMMENT: return "COMMENT";
case SECTION_APP0: return "APP0";
case SECTION_EXIF: return "EXIF";
case SECTION_FPIX: return "FPIX";
case SECTION_GPS: return "GPS";
case SECTION_INTEROP: return "INTEROP";
case SECTION_APP12: return "APP12";
case SECTION_WINXP: return "WINXP";
case SECTION_MAKERNOTE: return "MAKERNOTE";
}
return "";
}
static tag_table_type exif_get_tag_table(int section)
{
switch(section) {
case SECTION_FILE: return &tag_table_IFD[0];
case SECTION_COMPUTED: return &tag_table_IFD[0];
case SECTION_ANY_TAG: return &tag_table_IFD[0];
case SECTION_IFD0: return &tag_table_IFD[0];
case SECTION_THUMBNAIL: return &tag_table_IFD[0];
case SECTION_COMMENT: return &tag_table_IFD[0];
case SECTION_APP0: return &tag_table_IFD[0];
case SECTION_EXIF: return &tag_table_IFD[0];
case SECTION_FPIX: return &tag_table_IFD[0];
case SECTION_GPS: return &tag_table_GPS[0];
case SECTION_INTEROP: return &tag_table_IOP[0];
case SECTION_APP12: return &tag_table_IFD[0];
case SECTION_WINXP: return &tag_table_IFD[0];
}
return &tag_table_IFD[0];
}
/* }}} */
/* {{{ exif_get_sectionlist
Return list of sectionnames specified by sectionlist. Return value must be freed
*/
static char *exif_get_sectionlist(int sectionlist TSRMLS_DC)
{
int i, len, ml = 0;
char *sections;
for(i=0; i<SECTION_COUNT; i++) {
ml += strlen(exif_get_sectionname(i))+2;
}
sections = safe_emalloc(ml, 1, 1);
sections[0] = '\0';
len = 0;
for(i=0; i<SECTION_COUNT; i++) {
if (sectionlist&(1<<i)) {
snprintf(sections+len, ml-len, "%s, ", exif_get_sectionname(i));
len = strlen(sections);
}
}
if (len>2)
sections[len-2] = '\0';
return sections;
}
/* }}} */
/* {{{ struct image_info_type
This structure stores Exif header image elements in a simple manner
Used to store camera data as extracted from the various ways that it can be
stored in a nexif header
*/
typedef struct {
int type;
size_t size;
uchar *data;
} file_section;
typedef struct {
int count;
file_section *list;
} file_section_list;
typedef struct {
image_filetype filetype;
size_t width, height;
size_t size;
size_t offset;
char *data;
} thumbnail_data;
typedef struct {
char *value;
size_t size;
int tag;
} xp_field_type;
typedef struct {
int count;
xp_field_type *list;
} xp_field_list;
/* This structure is used to store a section of a Jpeg file. */
typedef struct {
php_stream *infile;
char *FileName;
time_t FileDateTime;
size_t FileSize;
image_filetype FileType;
int Height, Width;
int IsColor;
char *make;
char *model;
float ApertureFNumber;
float ExposureTime;
double FocalplaneUnits;
float CCDWidth;
double FocalplaneXRes;
size_t ExifImageWidth;
float FocalLength;
float Distance;
int motorola_intel; /* 1 Motorola; 0 Intel */
char *UserComment;
int UserCommentLength;
char *UserCommentEncoding;
char *encode_unicode;
char *decode_unicode_be;
char *decode_unicode_le;
char *encode_jis;
char *decode_jis_be;
char *decode_jis_le;
char *Copyright;/* EXIF standard defines Copyright as "<Photographer> [ '\0' <Editor> ] ['\0']" */
char *CopyrightPhotographer;
char *CopyrightEditor;
xp_field_list xp_fields;
thumbnail_data Thumbnail;
/* other */
int sections_found; /* FOUND_<marker> */
image_info_list info_list[SECTION_COUNT];
/* for parsing */
int read_thumbnail;
int read_all;
int ifd_nesting_level;
/* internal */
file_section_list file;
} image_info_type;
/* }}} */
/* {{{ exif_error_docref */
static void exif_error_docref(const char *docref EXIFERR_DC, const image_info_type *ImageInfo, int type, const char *format, ...)
{
va_list args;
va_start(args, format);
#ifdef EXIF_DEBUG
{
char *buf;
spprintf(&buf, 0, "%s(%d): %s", _file, _line, format);
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, buf, args TSRMLS_CC);
efree(buf);
}
#else
php_verror(docref, ImageInfo->FileName?ImageInfo->FileName:"", type, format, args TSRMLS_CC);
#endif
va_end(args);
}
/* }}} */
/* {{{ jpeg_sof_info
*/
typedef struct {
int bits_per_sample;
size_t width;
size_t height;
int num_components;
} jpeg_sof_info;
/* }}} */
/* {{{ exif_file_sections_add
Add a file_section to image_info
returns the used block or -1. if size>0 and data == NULL buffer of size is allocated
*/
static int exif_file_sections_add(image_info_type *ImageInfo, int type, size_t size, uchar *data)
{
file_section *tmp;
int count = ImageInfo->file.count;
tmp = safe_erealloc(ImageInfo->file.list, (count+1), sizeof(file_section), 0);
ImageInfo->file.list = tmp;
ImageInfo->file.list[count].type = 0xFFFF;
ImageInfo->file.list[count].data = NULL;
ImageInfo->file.list[count].size = 0;
ImageInfo->file.count = count+1;
if (!size) {
data = NULL;
} else if (data == NULL) {
data = safe_emalloc(size, 1, 0);
}
ImageInfo->file.list[count].type = type;
ImageInfo->file.list[count].data = data;
ImageInfo->file.list[count].size = size;
return count;
}
/* }}} */
/* {{{ exif_file_sections_realloc
Reallocate a file section returns 0 on success and -1 on failure
*/
static int exif_file_sections_realloc(image_info_type *ImageInfo, int section_index, size_t size TSRMLS_DC)
{
void *tmp;
/* This is not a malloc/realloc check. It is a plausibility check for the
* function parameters (requirements engineering).
*/
if (section_index >= ImageInfo->file.count) {
EXIF_ERRLOG_FSREALLOC(ImageInfo)
return -1;
}
tmp = safe_erealloc(ImageInfo->file.list[section_index].data, 1, size, 0);
ImageInfo->file.list[section_index].data = tmp;
ImageInfo->file.list[section_index].size = size;
return 0;
}
/* }}} */
/* {{{ exif_file_section_free
Discard all file_sections in ImageInfo
*/
static int exif_file_sections_free(image_info_type *ImageInfo)
{
int i;
if (ImageInfo->file.count) {
for (i=0; i<ImageInfo->file.count; i++) {
EFREE_IF(ImageInfo->file.list[i].data);
}
}
EFREE_IF(ImageInfo->file.list);
ImageInfo->file.count = 0;
return TRUE;
}
/* }}} */
/* {{{ exif_iif_add_value
Add a value to image_info
*/
static void exif_iif_add_value(image_info_type *image_info, int section_index, char *name, int tag, int format, int length, void* value, int motorola_intel TSRMLS_DC)
{
size_t idex;
void *vptr;
image_info_value *info_value;
image_info_data *info_data;
image_info_data *list;
if (length < 0) {
return;
}
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
memset(info_data, 0, sizeof(image_info_data));
info_data->tag = tag;
info_data->format = format;
info_data->length = length;
info_data->name = estrdup(name);
info_value = &info_data->value;
switch (format) {
case TAG_FMT_STRING:
if (value) {
length = php_strnlen(value, length);
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
info_data->tag = TAG_FMT_UNDEFINED;/* otherwise not freed from memory */
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (!length)
break;
case TAG_FMT_UNDEFINED:
if (value) {
if (tag == TAG_MAKER_NOTE) {
length = MIN(length, strlen(value));
}
/* do not recompute length here */
info_value->s = estrndup(value, length);
info_data->length = length;
} else {
info_data->length = 0;
info_value->s = estrdup("");
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
if (length==0) {
break;
} else
if (length>1) {
info_value->list = safe_emalloc(length, sizeof(image_info_value), 0);
} else {
info_value = &info_data->value;
}
for (idex=0,vptr=value; idex<(size_t)length; idex++,vptr=(char *) vptr + php_tiff_bytes_per_format[format]) {
if (length>1) {
info_value = &info_data->value.list[idex];
}
switch (format) {
case TAG_FMT_USHORT:
info_value->u = php_ifd_get16u(vptr, motorola_intel);
break;
case TAG_FMT_ULONG:
info_value->u = php_ifd_get32u(vptr, motorola_intel);
break;
case TAG_FMT_URATIONAL:
info_value->ur.num = php_ifd_get32u(vptr, motorola_intel);
info_value->ur.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SSHORT:
info_value->i = php_ifd_get16s(vptr, motorola_intel);
break;
case TAG_FMT_SLONG:
info_value->i = php_ifd_get32s(vptr, motorola_intel);
break;
case TAG_FMT_SRATIONAL:
info_value->sr.num = php_ifd_get32u(vptr, motorola_intel);
info_value->sr.den = php_ifd_get32u(4+(char *)vptr, motorola_intel);
break;
case TAG_FMT_SINGLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type single");
#endif
info_value->f = *(float *)value;
case TAG_FMT_DOUBLE:
#ifdef EXIF_DEBUG
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Found value of type double");
#endif
info_value->d = *(double *)value;
break;
}
}
}
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_tag
Add a tag from IFD to image_info
*/
static void exif_iif_add_tag(image_info_type *image_info, int section_index, char *name, int tag, int format, size_t length, void* value TSRMLS_DC)
{
exif_iif_add_value(image_info, section_index, name, tag, format, (int)length, value, image_info->motorola_intel TSRMLS_CC);
}
/* }}} */
/* {{{ exif_iif_add_int
Add an int value to image_info
*/
static void exif_iif_add_int(image_info_type *image_info, int section_index, char *name, int value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_SLONG;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.i = value;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_str(image_info_type *image_info, int section_index, char *name, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_STRING;
info_data->length = 1;
info_data->name = estrdup(name);
info_data->value.s = estrdup(value);
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_add_fmt
Add a format string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_fmt(image_info_type *image_info, int section_index, char *name TSRMLS_DC, char *value, ...)
{
char *tmp;
va_list arglist;
va_start(arglist, value);
if (value) {
vspprintf(&tmp, 0, value, arglist);
exif_iif_add_str(image_info, section_index, name, tmp TSRMLS_CC);
efree(tmp);
}
va_end(arglist);
}
/* }}} */
/* {{{ exif_iif_add_str
Add a string value to image_info MUST BE NUL TERMINATED
*/
static void exif_iif_add_buffer(image_info_type *image_info, int section_index, char *name, int length, char *value TSRMLS_DC)
{
image_info_data *info_data;
image_info_data *list;
if (value) {
list = safe_erealloc(image_info->info_list[section_index].list, (image_info->info_list[section_index].count+1), sizeof(image_info_data), 0);
image_info->info_list[section_index].list = list;
info_data = &image_info->info_list[section_index].list[image_info->info_list[section_index].count];
info_data->tag = TAG_NONE;
info_data->format = TAG_FMT_UNDEFINED;
info_data->length = length;
info_data->name = estrdup(name);
info_data->value.s = safe_emalloc(length, 1, 1);
memcpy(info_data->value.s, value, length);
info_data->value.s[length] = 0;
image_info->sections_found |= 1<<section_index;
image_info->info_list[section_index].count++;
}
}
/* }}} */
/* {{{ exif_iif_free
Free memory allocated for image_info
*/
static void exif_iif_free(image_info_type *image_info, int section_index) {
int i;
void *f; /* faster */
if (image_info->info_list[section_index].count) {
for (i=0; i < image_info->info_list[section_index].count; i++) {
if ((f=image_info->info_list[section_index].list[i].name) != NULL) {
efree(f);
}
switch(image_info->info_list[section_index].list[i].format) {
case TAG_FMT_SBYTE:
case TAG_FMT_BYTE:
/* in contrast to strings bytes do not need to allocate buffer for NULL if length==0 */
if (image_info->info_list[section_index].list[i].length<1)
break;
default:
case TAG_FMT_UNDEFINED:
case TAG_FMT_STRING:
if ((f=image_info->info_list[section_index].list[i].value.s) != NULL) {
efree(f);
}
break;
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
case TAG_FMT_URATIONAL:
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
case TAG_FMT_SRATIONAL:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
/* nothing to do here */
if (image_info->info_list[section_index].list[i].length > 1) {
if ((f=image_info->info_list[section_index].list[i].value.list) != NULL) {
efree(f);
}
}
break;
}
}
}
EFREE_IF(image_info->info_list[section_index].list);
}
/* }}} */
/* {{{ add_assoc_image_info
* Add image_info to associative array value. */
static void add_assoc_image_info(zval *value, int sub_array, image_info_type *image_info, int section_index TSRMLS_DC)
{
char buffer[64], *val, *name, uname[64];
int i, ap, l, b, idx=0, unknown=0;
#ifdef EXIF_DEBUG
int info_tag;
#endif
image_info_value *info_value;
image_info_data *info_data;
zval *tmpi, *array = NULL;
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding %d infos from section %s", image_info->info_list[section_index].count, exif_get_sectionname(section_index));*/
#endif
if (image_info->info_list[section_index].count) {
if (sub_array) {
MAKE_STD_ZVAL(tmpi);
array_init(tmpi);
} else {
tmpi = value;
}
for(i=0; i<image_info->info_list[section_index].count; i++) {
info_data = &image_info->info_list[section_index].list[i];
#ifdef EXIF_DEBUG
info_tag = info_data->tag; /* conversion */
#endif
info_value = &info_data->value;
if (!(name = info_data->name)) {
snprintf(uname, sizeof(uname), "%d", unknown++);
name = uname;
}
#ifdef EXIF_DEBUG
/* php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Adding infos: tag(0x%04X,%12s,L=0x%04X): %s", info_tag, exif_get_tagname(info_tag, buffer, -12, exif_get_tag_table(section_index) TSRMLS_CC), info_data->length, info_data->format==TAG_FMT_STRING?(info_value&&info_value->s?info_value->s:"<no data>"):exif_get_tagformat(info_data->format));*/
#endif
if (info_data->length==0) {
add_assoc_null(tmpi, name);
} else {
switch (info_data->format) {
default:
/* Standard says more types possible but skip them...
* but allow users to handle data if they know how to
* So not return but use type UNDEFINED
* return;
*/
case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:
case TAG_FMT_UNDEFINED:
if (!info_value->s) {
add_assoc_stringl(tmpi, name, "", 0, 1);
} else {
add_assoc_stringl(tmpi, name, info_value->s, info_data->length, 1);
}
break;
case TAG_FMT_STRING:
if (!(val = info_value->s)) {
val = "";
}
if (section_index==SECTION_COMMENT) {
add_index_string(tmpi, idx++, val, 1);
} else {
add_assoc_string(tmpi, name, val, 1);
}
break;
case TAG_FMT_URATIONAL:
case TAG_FMT_SRATIONAL:
/*case TAG_FMT_BYTE:
case TAG_FMT_SBYTE:*/
case TAG_FMT_USHORT:
case TAG_FMT_SSHORT:
case TAG_FMT_SINGLE:
case TAG_FMT_DOUBLE:
case TAG_FMT_ULONG:
case TAG_FMT_SLONG:
/* now the rest, first see if it becomes an array */
if ((l = info_data->length) > 1) {
array = NULL;
MAKE_STD_ZVAL(array);
array_init(array);
}
for(ap=0; ap<l; ap++) {
if (l>1) {
info_value = &info_data->value.list[ap];
}
switch (info_data->format) {
case TAG_FMT_BYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, b, (int)(info_value->s[b]));
}
break;
}
case TAG_FMT_USHORT:
case TAG_FMT_ULONG:
if (l==1) {
add_assoc_long(tmpi, name, (int)info_value->u);
} else {
add_index_long(array, ap, (int)info_value->u);
}
break;
case TAG_FMT_URATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->ur.num, info_value->ur.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SBYTE:
if (l>1) {
info_value = &info_data->value;
for (b=0;b<l;b++) {
add_index_long(array, ap, (int)info_value->s[b]);
}
break;
}
case TAG_FMT_SSHORT:
case TAG_FMT_SLONG:
if (l==1) {
add_assoc_long(tmpi, name, info_value->i);
} else {
add_index_long(array, ap, info_value->i);
}
break;
case TAG_FMT_SRATIONAL:
snprintf(buffer, sizeof(buffer), "%i/%i", info_value->sr.num, info_value->sr.den);
if (l==1) {
add_assoc_string(tmpi, name, buffer, 1);
} else {
add_index_string(array, ap, buffer, 1);
}
break;
case TAG_FMT_SINGLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->f);
} else {
add_index_double(array, ap, info_value->f);
}
break;
case TAG_FMT_DOUBLE:
if (l==1) {
add_assoc_double(tmpi, name, info_value->d);
} else {
add_index_double(array, ap, info_value->d);
}
break;
}
info_value = &info_data->value.list[ap];
}
if (l>1) {
add_assoc_zval(tmpi, name, array);
}
break;
}
}
}
if (sub_array) {
add_assoc_zval(value, exif_get_sectionname(section_index), tmpi);
}
}
}
/* }}} */
/* {{{ Markers
JPEG markers consist of one or more 0xFF bytes, followed by a marker
code byte (which is not an FF). Here are the marker codes of interest
in this program. (See jdmarker.c for a more complete list.)
*/
#define M_TEM 0x01 /* temp for arithmetic coding */
#define M_RES 0x02 /* reserved */
#define M_SOF0 0xC0 /* Start Of Frame N */
#define M_SOF1 0xC1 /* N indicates which compression process */
#define M_SOF2 0xC2 /* Only SOF0-SOF2 are now in common use */
#define M_SOF3 0xC3
#define M_DHT 0xC4
#define M_SOF5 0xC5 /* NB: codes C4 and CC are NOT SOF markers */
#define M_SOF6 0xC6
#define M_SOF7 0xC7
#define M_JPEG 0x08 /* reserved for extensions */
#define M_SOF9 0xC9
#define M_SOF10 0xCA
#define M_SOF11 0xCB
#define M_DAC 0xCC /* arithmetic table */
#define M_SOF13 0xCD
#define M_SOF14 0xCE
#define M_SOF15 0xCF
#define M_RST0 0xD0 /* restart segment */
#define M_RST1 0xD1
#define M_RST2 0xD2
#define M_RST3 0xD3
#define M_RST4 0xD4
#define M_RST5 0xD5
#define M_RST6 0xD6
#define M_RST7 0xD7
#define M_SOI 0xD8 /* Start Of Image (beginning of datastream) */
#define M_EOI 0xD9 /* End Of Image (end of datastream) */
#define M_SOS 0xDA /* Start Of Scan (begins compressed data) */
#define M_DQT 0xDB
#define M_DNL 0xDC
#define M_DRI 0xDD
#define M_DHP 0xDE
#define M_EXP 0xDF
#define M_APP0 0xE0 /* JPEG: 'JFIFF' AND (additional 'JFXX') */
#define M_EXIF 0xE1 /* Exif Attribute Information */
#define M_APP2 0xE2 /* Flash Pix Extension Data? */
#define M_APP3 0xE3
#define M_APP4 0xE4
#define M_APP5 0xE5
#define M_APP6 0xE6
#define M_APP7 0xE7
#define M_APP8 0xE8
#define M_APP9 0xE9
#define M_APP10 0xEA
#define M_APP11 0xEB
#define M_APP12 0xEC
#define M_APP13 0xED /* IPTC International Press Telecommunications Council */
#define M_APP14 0xEE /* Software, Copyright? */
#define M_APP15 0xEF
#define M_JPG0 0xF0
#define M_JPG1 0xF1
#define M_JPG2 0xF2
#define M_JPG3 0xF3
#define M_JPG4 0xF4
#define M_JPG5 0xF5
#define M_JPG6 0xF6
#define M_JPG7 0xF7
#define M_JPG8 0xF8
#define M_JPG9 0xF9
#define M_JPG10 0xFA
#define M_JPG11 0xFB
#define M_JPG12 0xFC
#define M_JPG13 0xFD
#define M_COM 0xFE /* COMment */
#define M_PSEUDO 0x123 /* Extra value. */
/* }}} */
/* {{{ jpeg2000 markers
*/
/* Markers x30 - x3F do not have a segment */
/* Markers x00, x01, xFE, xC0 - xDF ISO/IEC 10918-1 -> M_<xx> */
/* Markers xF0 - xF7 ISO/IEC 10918-3 */
/* Markers xF7 - xF8 ISO/IEC 14495-1 */
/* XY=Main/Tile-header:(R:required, N:not_allowed, O:optional, L:last_marker) */
#define JC_SOC 0x4F /* NN, Start of codestream */
#define JC_SIZ 0x51 /* RN, Image and tile size */
#define JC_COD 0x52 /* RO, Codeing style defaulte */
#define JC_COC 0x53 /* OO, Coding style component */
#define JC_TLM 0x55 /* ON, Tile part length main header */
#define JC_PLM 0x57 /* ON, Packet length main header */
#define JC_PLT 0x58 /* NO, Packet length tile part header */
#define JC_QCD 0x5C /* RO, Quantization default */
#define JC_QCC 0x5D /* OO, Quantization component */
#define JC_RGN 0x5E /* OO, Region of interest */
#define JC_POD 0x5F /* OO, Progression order default */
#define JC_PPM 0x60 /* ON, Packed packet headers main header */
#define JC_PPT 0x61 /* NO, Packet packet headers tile part header */
#define JC_CME 0x64 /* OO, Comment: "LL E <text>" E=0:binary, E=1:ascii */
#define JC_SOT 0x90 /* NR, Start of tile */
#define JC_SOP 0x91 /* NO, Start of packeter default */
#define JC_EPH 0x92 /* NO, End of packet header */
#define JC_SOD 0x93 /* NL, Start of data */
#define JC_EOC 0xD9 /* NN, End of codestream */
/* }}} */
/* {{{ exif_process_COM
Process a COM marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
static void exif_process_COM (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length-2, value+2 TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_CME
Process a CME marker.
We want to print out the marker contents as legible text;
we must guard against random junk and varying newline representations.
*/
#ifdef EXIF_JPEG2000
static void exif_process_CME (image_info_type *image_info, char *value, size_t length TSRMLS_DC)
{
if (length>3) {
switch(value[2]) {
case 0:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, length, value TSRMLS_CC);
break;
case 1:
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_STRING, length, value);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Undefined JPEG2000 comment encoding");
break;
}
} else {
exif_iif_add_tag(image_info, SECTION_COMMENT, "Comment", TAG_COMPUTED_VALUE, TAG_FMT_UNDEFINED, 0, NULL);
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "JPEG2000 comment section too small");
}
}
#endif
/* }}} */
/* {{{ exif_process_SOFn
* Process a SOFn marker. This is useful for the image dimensions */
static void exif_process_SOFn (uchar *Data, int marker, jpeg_sof_info *result)
{
/* 0xFF SOSn SectLen(2) Bits(1) Height(2) Width(2) Channels(1) 3*Channels (1) */
result->bits_per_sample = Data[2];
result->height = php_jpg_get16(Data+3);
result->width = php_jpg_get16(Data+5);
result->num_components = Data[7];
/* switch (marker) {
case M_SOF0: process = "Baseline"; break;
case M_SOF1: process = "Extended sequential"; break;
case M_SOF2: process = "Progressive"; break;
case M_SOF3: process = "Lossless"; break;
case M_SOF5: process = "Differential sequential"; break;
case M_SOF6: process = "Differential progressive"; break;
case M_SOF7: process = "Differential lossless"; break;
case M_SOF9: process = "Extended sequential, arithmetic coding"; break;
case M_SOF10: process = "Progressive, arithmetic coding"; break;
case M_SOF11: process = "Lossless, arithmetic coding"; break;
case M_SOF13: process = "Differential sequential, arithmetic coding"; break;
case M_SOF14: process = "Differential progressive, arithmetic coding"; break;
case M_SOF15: process = "Differential lossless, arithmetic coding"; break;
default: process = "Unknown"; break;
}*/
}
/* }}} */
/* forward declarations */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC);
static int exif_process_IFD_TAG( image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC);
/* {{{ exif_get_markername
Get name of marker */
#ifdef EXIF_DEBUG
static char * exif_get_markername(int marker)
{
switch(marker) {
case 0xC0: return "SOF0";
case 0xC1: return "SOF1";
case 0xC2: return "SOF2";
case 0xC3: return "SOF3";
case 0xC4: return "DHT";
case 0xC5: return "SOF5";
case 0xC6: return "SOF6";
case 0xC7: return "SOF7";
case 0xC9: return "SOF9";
case 0xCA: return "SOF10";
case 0xCB: return "SOF11";
case 0xCD: return "SOF13";
case 0xCE: return "SOF14";
case 0xCF: return "SOF15";
case 0xD8: return "SOI";
case 0xD9: return "EOI";
case 0xDA: return "SOS";
case 0xDB: return "DQT";
case 0xDC: return "DNL";
case 0xDD: return "DRI";
case 0xDE: return "DHP";
case 0xDF: return "EXP";
case 0xE0: return "APP0";
case 0xE1: return "EXIF";
case 0xE2: return "FPIX";
case 0xE3: return "APP3";
case 0xE4: return "APP4";
case 0xE5: return "APP5";
case 0xE6: return "APP6";
case 0xE7: return "APP7";
case 0xE8: return "APP8";
case 0xE9: return "APP9";
case 0xEA: return "APP10";
case 0xEB: return "APP11";
case 0xEC: return "APP12";
case 0xED: return "APP13";
case 0xEE: return "APP14";
case 0xEF: return "APP15";
case 0xF0: return "JPG0";
case 0xFD: return "JPG13";
case 0xFE: return "COM";
case 0x01: return "TEM";
}
return "Unknown";
}
#endif
/* }}} */
/* {{{ proto string exif_tagname(index)
Get headername for index or false if not defined */
PHP_FUNCTION(exif_tagname)
{
long tag;
char *szTemp;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &tag) == FAILURE) {
return;
}
szTemp = exif_get_tagname(tag, NULL, 0, tag_table_IFD TSRMLS_CC);
if (tag < 0 || !szTemp || !szTemp[0]) {
RETURN_FALSE;
}
RETURN_STRING(szTemp, 1)
}
/* }}} */
/* {{{ exif_ifd_make_value
* Create a value for an ifd from an info_data pointer */
static void* exif_ifd_make_value(image_info_data *info_data, int motorola_intel TSRMLS_DC) {
size_t byte_count;
char *value_ptr, *data_ptr;
size_t i;
image_info_value *info_value;
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
value_ptr = safe_emalloc(max(byte_count, 4), 1, 0);
memset(value_ptr, 0, 4);
if (!info_data->length) {
return value_ptr;
}
if (info_data->format == TAG_FMT_UNDEFINED || info_data->format == TAG_FMT_STRING
|| (byte_count>1 && (info_data->format == TAG_FMT_BYTE || info_data->format == TAG_FMT_SBYTE))
) {
memmove(value_ptr, info_data->value.s, byte_count);
return value_ptr;
} else if (info_data->format == TAG_FMT_BYTE) {
*value_ptr = info_data->value.u;
return value_ptr;
} else if (info_data->format == TAG_FMT_SBYTE) {
*value_ptr = info_data->value.i;
return value_ptr;
} else {
data_ptr = value_ptr;
for(i=0; i<info_data->length; i++) {
if (info_data->length==1) {
info_value = &info_data->value;
} else {
info_value = &info_data->value.list[i];
}
switch(info_data->format) {
case TAG_FMT_USHORT:
php_ifd_set16u(data_ptr, info_value->u, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_ULONG:
php_ifd_set32u(data_ptr, info_value->u, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_SSHORT:
php_ifd_set16u(data_ptr, info_value->i, motorola_intel);
data_ptr += 2;
break;
case TAG_FMT_SLONG:
php_ifd_set32u(data_ptr, info_value->i, motorola_intel);
data_ptr += 4;
break;
case TAG_FMT_URATIONAL:
php_ifd_set32u(data_ptr, info_value->sr.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->sr.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SRATIONAL:
php_ifd_set32u(data_ptr, info_value->ur.num, motorola_intel);
php_ifd_set32u(data_ptr+4, info_value->ur.den, motorola_intel);
data_ptr += 8;
break;
case TAG_FMT_SINGLE:
memmove(data_ptr, &info_value->f, 4);
data_ptr += 4;
break;
case TAG_FMT_DOUBLE:
memmove(data_ptr, &info_value->d, 8);
data_ptr += 8;
break;
}
}
}
return value_ptr;
}
/* }}} */
/* {{{ exif_thumbnail_build
* Check and build thumbnail */
static void exif_thumbnail_build(image_info_type *ImageInfo TSRMLS_DC) {
size_t new_size, new_move, new_value;
char *new_data;
void *value_ptr;
int i, byte_count;
image_info_list *info_list;
image_info_data *info_data;
#ifdef EXIF_DEBUG
char tagname[64];
#endif
if (!ImageInfo->read_thumbnail || !ImageInfo->Thumbnail.offset || !ImageInfo->Thumbnail.size) {
return; /* ignore this call */
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: filetype = %d", ImageInfo->Thumbnail.filetype);
#endif
switch(ImageInfo->Thumbnail.filetype) {
default:
case IMAGE_FILETYPE_JPEG:
/* done */
break;
case IMAGE_FILETYPE_TIFF_II:
case IMAGE_FILETYPE_TIFF_MM:
info_list = &ImageInfo->info_list[SECTION_THUMBNAIL];
new_size = 8 + 2 + info_list->count * 12 + 4;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size of signature + directory(%d): 0x%02X", info_list->count, new_size);
#endif
new_value= new_size; /* offset for ifd values outside ifd directory */
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
if (byte_count > 4) {
new_size += byte_count;
}
}
new_move = new_size;
new_data = safe_erealloc(ImageInfo->Thumbnail.data, 1, ImageInfo->Thumbnail.size, new_size);
ImageInfo->Thumbnail.data = new_data;
memmove(ImageInfo->Thumbnail.data + new_move, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
ImageInfo->Thumbnail.size += new_size;
/* fill in data */
if (ImageInfo->motorola_intel) {
memmove(new_data, "MM\x00\x2a\x00\x00\x00\x08", 8);
} else {
memmove(new_data, "II\x2a\x00\x08\x00\x00\x00", 8);
}
new_data += 8;
php_ifd_set16u(new_data, info_list->count, ImageInfo->motorola_intel);
new_data += 2;
for (i=0; i<info_list->count; i++) {
info_data = &info_list->list[i];
byte_count = php_tiff_bytes_per_format[info_data->format] * info_data->length;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process tag(x%04X=%s): %s%s (%d bytes)", info_data->tag, exif_get_tagname(info_data->tag, tagname, -12, tag_table_IFD TSRMLS_CC), (info_data->length>1)&&info_data->format!=TAG_FMT_UNDEFINED&&info_data->format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(info_data->format), byte_count);
#endif
if (info_data->tag==TAG_STRIP_OFFSETS || info_data->tag==TAG_JPEG_INTERCHANGE_FORMAT) {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, TAG_FMT_ULONG, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, 1, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 8, new_move, ImageInfo->motorola_intel);
} else {
php_ifd_set16u(new_data + 0, info_data->tag, ImageInfo->motorola_intel);
php_ifd_set16u(new_data + 2, info_data->format, ImageInfo->motorola_intel);
php_ifd_set32u(new_data + 4, info_data->length, ImageInfo->motorola_intel);
value_ptr = exif_ifd_make_value(info_data, ImageInfo->motorola_intel TSRMLS_CC);
if (byte_count <= 4) {
memmove(new_data+8, value_ptr, 4);
} else {
php_ifd_set32u(new_data+8, new_value, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: writing with value offset: 0x%04X + 0x%02X", new_value, byte_count);
#endif
memmove(ImageInfo->Thumbnail.data+new_value, value_ptr, byte_count);
new_value += byte_count;
}
efree(value_ptr);
}
new_data += 12;
}
memset(new_data, 0, 4); /* next ifd pointer */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: created");
#endif
break;
}
}
/* }}} */
/* {{{ exif_thumbnail_extract
* Grab the thumbnail, corrected */
static void exif_thumbnail_extract(image_info_type *ImageInfo, char *offset, size_t length TSRMLS_DC) {
if (ImageInfo->Thumbnail.data) {
exif_error_docref("exif_read_data#error_mult_thumb" EXIFERR_CC, ImageInfo, E_WARNING, "Multiple possible thumbnails");
return; /* Should not happen */
}
if (!ImageInfo->read_thumbnail) {
return; /* ignore this call */
}
/* according to exif2.1, the thumbnail is not supposed to be greater than 64K */
if (ImageInfo->Thumbnail.size >= 65536
|| ImageInfo->Thumbnail.size <= 0
|| ImageInfo->Thumbnail.offset <= 0
) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Illegal thumbnail size/offset");
return;
}
/* Check to make sure we are not going to go past the ExifLength */
if ((ImageInfo->Thumbnail.offset + ImageInfo->Thumbnail.size) > length) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
return;
}
ImageInfo->Thumbnail.data = estrndup(offset + ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
/* }}} */
/* {{{ exif_process_undefined
* Copy a string/buffer in Exif header to a character string and return length of allocated buffer if any. */
static int exif_process_undefined(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
* estrndup does not return length
*/
if (byte_count) {
(*result) = estrndup(value, byte_count); /* NULL @ byte_count!!! */
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string_raw
* Copy a string in Exif header to a character string returns length of allocated buffer if any. */
static int exif_process_string_raw(char **result, char *value, size_t byte_count) {
/* we cannot use strlcpy - here the problem is that we have to copy NUL
* chars up to byte_count, we also have to add a single NUL character to
* force end of string.
*/
if (byte_count) {
(*result) = safe_emalloc(byte_count, 1, 1);
memcpy(*result, value, byte_count);
(*result)[byte_count] = '\0';
return byte_count+1;
}
return 0;
}
/* }}} */
/* {{{ exif_process_string
* Copy a string in Exif header to a character string and return length of allocated buffer if any.
* In contrast to exif_process_string this function does always return a string buffer */
static int exif_process_string(char **result, char *value, size_t byte_count TSRMLS_DC) {
/* we cannot use strlcpy - here the problem is that we cannot use strlen to
* determin length of string and we cannot use strlcpy with len=byte_count+1
* because then we might get into an EXCEPTION if we exceed an allocated
* memory page...so we use php_strnlen in conjunction with memcpy and add the NUL
* char.
* estrdup would sometimes allocate more memory and does not return length
*/
if ((byte_count=php_strnlen(value, byte_count)) > 0) {
return exif_process_undefined(result, value, byte_count TSRMLS_CC);
}
(*result) = estrndup("", 1); /* force empty string */
return byte_count+1;
}
/* }}} */
/* {{{ exif_process_user_comment
* Process UserComment in IFD. */
static int exif_process_user_comment(image_info_type *ImageInfo, char **pszInfoPtr, char **pszEncoding, char *szValuePtr, int ByteCount TSRMLS_DC)
{
int a;
char *decode;
size_t len;;
*pszEncoding = NULL;
/* Copy the comment */
if (ByteCount>=8) {
const zend_encoding *from, *to;
if (!memcmp(szValuePtr, "UNICODE\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* First try to detect BOM: ZERO WIDTH NOBREAK SPACE (FEFF 16)
* since we have no encoding support for the BOM yet we skip that.
*/
if (!memcmp(szValuePtr, "\xFE\xFF", 2)) {
decode = "UCS-2BE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (!memcmp(szValuePtr, "\xFF\xFE", 2)) {
decode = "UCS-2LE";
szValuePtr = szValuePtr+2;
ByteCount -= 2;
} else if (ImageInfo->motorola_intel) {
decode = ImageInfo->decode_unicode_be;
} else {
decode = ImageInfo->decode_unicode_le;
}
to = zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC);
from = zend_multibyte_fetch_encoding(decode TSRMLS_CC);
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "ASCII\0\0\0", 8)) {
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
} else if (!memcmp(szValuePtr, "JIS\0\0\0\0\0", 8)) {
/* JIS should be tanslated to MB or we leave it to the user - leave it to the user */
*pszEncoding = estrdup((const char*)szValuePtr);
szValuePtr = szValuePtr+8;
ByteCount -= 8;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
to = zend_multibyte_fetch_encoding(ImageInfo->encode_jis TSRMLS_CC);
from = zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_jis_be : ImageInfo->decode_jis_le TSRMLS_CC);
if (!to || !from || zend_multibyte_encoding_converter(
(unsigned char**)pszInfoPtr,
&len,
(unsigned char*)szValuePtr,
ByteCount,
to,
from
TSRMLS_CC) == (size_t)-1) {
len = exif_process_string_raw(pszInfoPtr, szValuePtr, ByteCount);
}
return len;
} else if (!memcmp(szValuePtr, "\0\0\0\0\0\0\0\0", 8)) {
/* 8 NULL means undefined and should be ASCII... */
*pszEncoding = estrdup("UNDEFINED");
szValuePtr = szValuePtr+8;
ByteCount -= 8;
}
}
/* Olympus has this padded with trailing spaces. Remove these first. */
if (ByteCount>0) {
for (a=ByteCount-1;a && szValuePtr[a]==' ';a--) {
(szValuePtr)[a] = '\0';
}
}
/* normal text without encoding */
exif_process_string(pszInfoPtr, szValuePtr, ByteCount TSRMLS_CC);
return strlen(*pszInfoPtr);
}
/* }}} */
/* {{{ exif_process_unicode
* Process unicode field in IFD. */
static int exif_process_unicode(image_info_type *ImageInfo, xp_field_type *xp_field, int tag, char *szValuePtr, int ByteCount TSRMLS_DC)
{
xp_field->tag = tag;
xp_field->value = NULL;
/* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */
if (zend_multibyte_encoding_converter(
(unsigned char**)&xp_field->value,
&xp_field->size,
(unsigned char*)szValuePtr,
ByteCount,
zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC),
zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_unicode_be : ImageInfo->decode_unicode_le TSRMLS_CC)
TSRMLS_CC) == (size_t)-1) {
xp_field->size = exif_process_string_raw(&xp_field->value, szValuePtr, ByteCount);
}
return xp_field->size;
}
/* }}} */
/* {{{ exif_process_IFD_in_MAKERNOTE
* Process nested IFDs directories in Maker Note. */
static int exif_process_IFD_in_MAKERNOTE(image_info_type *ImageInfo, char * value_ptr, int value_len, char *offset_base, size_t IFDlength, size_t displacement TSRMLS_DC)
{
int de, i=0, section_index = SECTION_MAKERNOTE;
int NumDirEntries, old_motorola_intel, offset_diff;
const maker_note_type *maker_note;
char *dir_start;
for (i=0; i<=sizeof(maker_note_array)/sizeof(maker_note_type); i++) {
if (i==sizeof(maker_note_array)/sizeof(maker_note_type)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "No maker note data found. Detected maker: %s (length = %d)", ImageInfo->make, strlen(ImageInfo->make));
#endif
/* unknown manufacturer, not an error, use it as a string */
return TRUE;
}
maker_note = maker_note_array+i;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "check (%s,%s)", maker_note->make?maker_note->make:"", maker_note->model?maker_note->model:"");*/
if (maker_note->make && (!ImageInfo->make || strcmp(maker_note->make, ImageInfo->make)))
continue;
if (maker_note->model && (!ImageInfo->model || strcmp(maker_note->model, ImageInfo->model)))
continue;
if (maker_note->id_string && strncmp(maker_note->id_string, value_ptr, maker_note->id_string_len))
continue;
break;
}
if (maker_note->offset >= value_len) {
/* Do not go past the value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X offset 0x%04X", value_len, maker_note->offset);
return FALSE;
}
dir_start = value_ptr + maker_note->offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s @x%04X + 0x%04X=%d: %s", exif_get_sectionname(section_index), (int)dir_start-(int)offset_base+maker_note->offset+displacement, value_len, value_len, exif_char_dump(value_ptr, value_len, (int)dir_start-(int)offset_base+maker_note->offset+displacement));
#endif
ImageInfo->sections_found |= FOUND_MAKERNOTE;
old_motorola_intel = ImageInfo->motorola_intel;
switch (maker_note->byte_order) {
case MN_ORDER_INTEL:
ImageInfo->motorola_intel = 0;
break;
case MN_ORDER_MOTOROLA:
ImageInfo->motorola_intel = 1;
break;
default:
case MN_ORDER_NORMAL:
break;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
switch (maker_note->offset_mode) {
case MN_OFFSET_MAKER:
offset_base = value_ptr;
break;
case MN_OFFSET_GUESS:
if (maker_note->offset + 10 + 4 >= value_len) {
/* Can not read dir_start+10 since it's beyond value end */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data too short: 0x%04X", value_len);
return FALSE;
}
offset_diff = 2 + NumDirEntries*12 + 4 - php_ifd_get32u(dir_start+10, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Using automatic offset correction: 0x%04X", ((int)dir_start-(int)offset_base+maker_note->offset+displacement) + offset_diff);
#endif
if (offset_diff < 0 || offset_diff >= value_len ) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "IFD data bad offset: 0x%04X length 0x%04X", offset_diff, value_len);
return FALSE;
}
offset_base = value_ptr + offset_diff;
break;
default:
case MN_OFFSET_NORMAL:
break;
}
if ((2+NumDirEntries*12) > value_len) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: 2 + 0x%04X*12 = 0x%04X > 0x%04X", NumDirEntries, 2+NumDirEntries*12, value_len);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 0, maker_note->tag_table TSRMLS_CC)) {
return FALSE;
}
}
ImageInfo->motorola_intel = old_motorola_intel;
/* NextDirOffset (must be NULL) = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);*/
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(SECTION_MAKERNOTE));
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_TAG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_TAG(image_info_type *ImageInfo, char *dir_entry, char *offset_base, size_t IFDlength, size_t displacement, int section_index, int ReadNextIFD, tag_table_type tag_table TSRMLS_DC)
{
size_t length;
int tag, format, components;
char *value_ptr, tagname[64], cbuf[32], *outside=NULL;
size_t byte_count, offset_val, fpos, fgot;
int64_t byte_count_signed;
xp_field_type *tmp_xp;
#ifdef EXIF_DEBUG
char *dump_data;
int dump_free;
#endif /* EXIF_DEBUG */
/* Protect against corrupt headers */
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "corrupt EXIF header: maximum directory nesting level reached");
return FALSE;
}
ImageInfo->ifd_nesting_level++;
tag = php_ifd_get16u(dir_entry, ImageInfo->motorola_intel);
format = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
components = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);
if (!format || format > NUM_FORMATS) {
/* (-1) catches illegal zero case as unsigned underflows to positive large. */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal format code 0x%04X, suppose BYTE", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), format);
format = TAG_FMT_BYTE;
/*return TRUE;*/
}
if (components < 0) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal components(%ld)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), components);
return FALSE;
}
byte_count_signed = (int64_t)components * php_tiff_bytes_per_format[format];
if (byte_count_signed < 0 || (byte_count_signed > INT32_MAX)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal byte_count", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC));
return FALSE;
}
byte_count = (size_t)byte_count_signed;
if (byte_count > 4) {
offset_val = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* If its bigger than 4 bytes, the dir entry contains an offset. */
value_ptr = offset_base+offset_val;
/*
dir_entry is ImageInfo->file.list[sn].data+2+i*12
offset_base is ImageInfo->file.list[sn].data-dir_offset
dir_entry - offset_base is dir_offset+2+i*12
*/
if (byte_count > IFDlength || offset_val > IFDlength-byte_count || value_ptr < dir_entry || offset_val < (size_t)(dir_entry-offset_base)) {
/* It is important to check for IMAGE_FILETYPE_TIFF
* JPEG does not use absolute pointers instead its pointers are
* relative to the start of the TIFF header in APP1 section. */
if (byte_count > ImageInfo->FileSize || offset_val>ImageInfo->FileSize-byte_count || (ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_II && ImageInfo->FileType!=IMAGE_FILETYPE_TIFF_MM && ImageInfo->FileType!=IMAGE_FILETYPE_JPEG)) {
if (value_ptr < dir_entry) {
/* we can read this if offset_val > 0 */
/* some files have their values in other parts of the file */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X < x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, dir_entry);
} else {
/* this is for sure not allowed */
/* exception are IFD pointers */
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Process tag(x%04X=%s): Illegal pointer offset(x%04X + x%04X = x%04X > x%04X)", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val, byte_count, offset_val+byte_count, IFDlength);
}
return FALSE;
}
if (byte_count>sizeof(cbuf)) {
/* mark as outside range and get buffer */
value_ptr = safe_emalloc(byte_count, 1, 0);
outside = value_ptr;
} else {
/* In most cases we only access a small range so
* it is faster to use a static buffer there
* BUT it offers also the possibility to have
* pointers read without the need to free them
* explicitley before returning. */
memset(&cbuf, 0, sizeof(cbuf));
value_ptr = cbuf;
}
fpos = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, displacement+offset_val, SEEK_SET);
fgot = php_stream_tell(ImageInfo->infile);
if (fgot!=displacement+offset_val) {
EFREE_IF(outside);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Wrong file pointer: 0x%08X != 0x%08X", fgot, displacement+offset_val);
return FALSE;
}
fgot = php_stream_read(ImageInfo->infile, value_ptr, byte_count);
php_stream_seek(ImageInfo->infile, fpos, SEEK_SET);
if (fgot<byte_count) {
EFREE_IF(outside);
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
} else {
/* 4 bytes or less and value is in the dir entry itself */
value_ptr = dir_entry+8;
offset_val= value_ptr-offset_base;
}
ImageInfo->sections_found |= FOUND_ANY_TAG;
#ifdef EXIF_DEBUG
dump_data = exif_dump_data(&dump_free, format, components, length, ImageInfo->motorola_intel, value_ptr TSRMLS_CC);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process tag(x%04X=%s,@x%04X + x%04X(=%d)): %s%s %s", tag, exif_get_tagname(tag, tagname, -12, tag_table TSRMLS_CC), offset_val+displacement, byte_count, byte_count, (components>1)&&format!=TAG_FMT_UNDEFINED&&format!=TAG_FMT_STRING?"ARRAY OF ":"", exif_get_tagformat(format), dump_data);
if (dump_free) {
efree(dump_data);
}
#endif
if (section_index==SECTION_THUMBNAIL) {
if (!ImageInfo->Thumbnail.data) {
switch(tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Thumbnail.width = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Thumbnail.height = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_OFFSETS:
case TAG_JPEG_INTERCHANGE_FORMAT:
/* accept both formats */
ImageInfo->Thumbnail.offset = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_STRIP_BYTE_COUNTS:
if (ImageInfo->FileType == IMAGE_FILETYPE_TIFF_II || ImageInfo->FileType == IMAGE_FILETYPE_TIFF_MM) {
ImageInfo->Thumbnail.filetype = ImageInfo->FileType;
} else {
/* motorola is easier to read */
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_TIFF_MM;
}
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_JPEG_INTERCHANGE_FORMAT_LEN:
if (ImageInfo->Thumbnail.filetype == IMAGE_FILETYPE_UNKNOWN) {
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_JPEG;
ImageInfo->Thumbnail.size = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
}
break;
}
}
} else {
if (section_index==SECTION_IFD0 || section_index==SECTION_EXIF)
switch(tag) {
case TAG_COPYRIGHT:
/* check for "<photographer> NUL <editor> NUL" */
if (byte_count>1 && (length=php_strnlen(value_ptr, byte_count)) > 0) {
if (length<byte_count-1) {
/* When there are any characters after the first NUL */
ImageInfo->CopyrightPhotographer = estrdup(value_ptr);
ImageInfo->CopyrightEditor = estrndup(value_ptr+length+1, byte_count-length-1);
spprintf(&ImageInfo->Copyright, 0, "%s, %s", ImageInfo->CopyrightPhotographer, ImageInfo->CopyrightEditor);
/* format = TAG_FMT_UNDEFINED; this musn't be ASCII */
/* but we are not supposed to change this */
/* keep in mind that image_info does not store editor value */
} else {
ImageInfo->Copyright = estrndup(value_ptr, byte_count);
}
}
break;
case TAG_USERCOMMENT:
ImageInfo->UserCommentLength = exif_process_user_comment(ImageInfo, &(ImageInfo->UserComment), &(ImageInfo->UserCommentEncoding), value_ptr, byte_count TSRMLS_CC);
break;
case TAG_XP_TITLE:
case TAG_XP_COMMENTS:
case TAG_XP_AUTHOR:
case TAG_XP_KEYWORDS:
case TAG_XP_SUBJECT:
tmp_xp = (xp_field_type*)safe_erealloc(ImageInfo->xp_fields.list, (ImageInfo->xp_fields.count+1), sizeof(xp_field_type), 0);
ImageInfo->sections_found |= FOUND_WINXP;
ImageInfo->xp_fields.list = tmp_xp;
ImageInfo->xp_fields.count++;
exif_process_unicode(ImageInfo, &(ImageInfo->xp_fields.list[ImageInfo->xp_fields.count-1]), tag, value_ptr, byte_count TSRMLS_CC);
break;
case TAG_FNUMBER:
/* Simplest way of expressing aperture, so I trust it the most.
(overwrite previously computed value if there is one) */
ImageInfo->ApertureFNumber = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_APERTURE:
case TAG_MAX_APERTURE:
/* More relevant info always comes earlier, so only use this field if we don't
have appropriate aperture information yet. */
if (ImageInfo->ApertureFNumber == 0) {
ImageInfo->ApertureFNumber
= (float)exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)*0.5);
}
break;
case TAG_SHUTTERSPEED:
/* More complicated way of expressing exposure time, so only use
this value if we don't already have it from somewhere else.
SHUTTERSPEED comes after EXPOSURE TIME
*/
if (ImageInfo->ExposureTime == 0) {
ImageInfo->ExposureTime
= (float)(1/exp(exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)*log(2)));
}
break;
case TAG_EXPOSURETIME:
ImageInfo->ExposureTime = -1;
break;
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->ExifImageWidth = exif_convert_any_to_int(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_X_RES:
ImageInfo->FocalplaneXRes = exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_SUBJECT_DISTANCE:
/* Inidcates the distacne the autofocus camera is focused to.
Tends to be less accurate as distance increases. */
ImageInfo->Distance = (float)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC);
break;
case TAG_FOCALPLANE_RESOLUTION_UNIT:
switch((int)exif_convert_any_format(value_ptr, format, ImageInfo->motorola_intel TSRMLS_CC)) {
case 1: ImageInfo->FocalplaneUnits = 25.4; break; /* inch */
case 2:
/* According to the information I was using, 2 measn meters.
But looking at the Cannon powershot's files, inches is the only
sensible value. */
ImageInfo->FocalplaneUnits = 25.4;
break;
case 3: ImageInfo->FocalplaneUnits = 10; break; /* centimeter */
case 4: ImageInfo->FocalplaneUnits = 1; break; /* milimeter */
case 5: ImageInfo->FocalplaneUnits = .001; break; /* micrometer */
}
break;
case TAG_SUB_IFD:
if (format==TAG_FMT_IFD) {
/* If this is called we are either in a TIFFs thumbnail or a JPEG where we cannot handle it */
/* TIFF thumbnail: our data structure cannot store a thumbnail of a thumbnail */
/* JPEG do we have the data area and what to do with it */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Skip SUB IFD");
}
break;
case TAG_MAKE:
ImageInfo->make = estrndup(value_ptr, byte_count);
break;
case TAG_MODEL:
ImageInfo->model = estrndup(value_ptr, byte_count);
break;
case TAG_MAKER_NOTE:
if (!exif_process_IFD_in_MAKERNOTE(ImageInfo, value_ptr, byte_count, offset_base, IFDlength, displacement TSRMLS_CC)) {
EFREE_IF(outside);
return FALSE;
}
break;
case TAG_EXIF_IFD_POINTER:
case TAG_GPS_IFD_POINTER:
case TAG_INTEROP_IFD_POINTER:
if (ReadNextIFD) {
char *Subdir_start;
int sub_section_index = 0;
switch(tag) {
case TAG_EXIF_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found EXIF");
#endif
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found GPS");
#endif
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Found INTEROPERABILITY");
#endif
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
}
Subdir_start = offset_base + php_ifd_get32u(value_ptr, ImageInfo->motorola_intel);
if (Subdir_start < offset_base || Subdir_start > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD Pointer");
return FALSE;
}
if (!exif_process_IFD_in_JPEG(ImageInfo, Subdir_start, offset_base, IFDlength, displacement, sub_section_index TSRMLS_CC)) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Subsection %s done", exif_get_sectionname(sub_section_index));
#endif
}
}
}
exif_iif_add_tag(ImageInfo, section_index, exif_get_tagname(tag, tagname, sizeof(tagname), tag_table TSRMLS_CC), tag, format, components, value_ptr TSRMLS_CC);
EFREE_IF(outside);
return TRUE;
}
/* }}} */
/* {{{ exif_process_IFD_in_JPEG
* Process one of the nested IFDs directories. */
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC)
{
int de;
int NumDirEntries;
int NextDirOffset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s (x%04X(=%d))", exif_get_sectionname(section_index), IFDlength, IFDlength);
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if ((dir_start + 2) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel);
if ((dir_start+2+NumDirEntries*12) > (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size: x%04X + 2 + x%04X*12 = x%04X > x%04X", (int)((size_t)dir_start+2-(size_t)offset_base), NumDirEntries, (int)((size_t)dir_start+2+NumDirEntries*12-(size_t)offset_base), IFDlength);
return FALSE;
}
for (de=0;de<NumDirEntries;de++) {
if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de,
offset_base, IFDlength, displacement, section_index, 1, exif_get_tag_table(section_index) TSRMLS_CC)) {
return FALSE;
}
}
/*
* Ignore IFD2 if it purportedly exists
*/
if (section_index == SECTION_THUMBNAIL) {
return TRUE;
}
/*
* Hack to make it process IDF1 I hope
* There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail
*/
if ((dir_start+2+12*de + 4) >= (offset_base+IFDlength)) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size");
return FALSE;
}
NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel);
if (NextDirOffset) {
/* the next line seems false but here IFDlength means length of all IFDs */
if (offset_base + NextDirOffset < offset_base || offset_base + NextDirOffset > offset_base+IFDlength) {
exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD offset");
return FALSE;
}
/* That is the IFD for the first thumbnail */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Expect next IFD to be thumbnail");
#endif
if (exif_process_IFD_in_JPEG(ImageInfo, offset_base + NextDirOffset, offset_base, IFDlength, displacement, SECTION_THUMBNAIL TSRMLS_CC)) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail size: 0x%04X", ImageInfo->Thumbnail.size);
#endif
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
exif_thumbnail_extract(ImageInfo, offset_base, IFDlength TSRMLS_CC);
}
return TRUE;
} else {
return FALSE;
}
}
return TRUE;
}
/* }}} */
/* {{{ exif_process_TIFF_in_JPEG
Process a TIFF header in a JPEG file
*/
static void exif_process_TIFF_in_JPEG(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
unsigned exif_value_2a, offset_of_ifd;
/* set the thumbnail stuff to nothing so we can test to see if they get set up */
if (memcmp(CharBuf, "II", 2) == 0) {
ImageInfo->motorola_intel = 0;
} else if (memcmp(CharBuf, "MM", 2) == 0) {
ImageInfo->motorola_intel = 1;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF alignment marker");
return;
}
/* Check the next two values for correctness. */
if (length < 8) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
exif_value_2a = php_ifd_get16u(CharBuf+2, ImageInfo->motorola_intel);
offset_of_ifd = php_ifd_get32u(CharBuf+4, ImageInfo->motorola_intel);
if (exif_value_2a != 0x2a || offset_of_ifd < 0x08) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF start (1)");
return;
}
if (offset_of_ifd > length) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid IFD start");
return;
}
ImageInfo->sections_found |= FOUND_IFD0;
/* First directory starts at offset 8. Offsets starts at 0. */
exif_process_IFD_in_JPEG(ImageInfo, CharBuf+offset_of_ifd, CharBuf, length/*-14*/, displacement, SECTION_IFD0 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process TIFF in JPEG done");
#endif
/* Compute the CCD width, in milimeters. */
if (ImageInfo->FocalplaneXRes != 0) {
ImageInfo->CCDWidth = (float)(ImageInfo->ExifImageWidth * ImageInfo->FocalplaneUnits / ImageInfo->FocalplaneXRes);
}
}
/* }}} */
/* {{{ exif_process_APP1
Process an JPEG APP1 block marker
Describes all the drivel that most digital cameras include...
*/
static void exif_process_APP1(image_info_type *ImageInfo, char *CharBuf, size_t length, size_t displacement TSRMLS_DC)
{
/* Check the APP1 for Exif Identifier Code */
static const uchar ExifHeader[] = {0x45, 0x78, 0x69, 0x66, 0x00, 0x00};
if (length <= 8 || memcmp(CharBuf+2, ExifHeader, 6)) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Incorrect APP1 Exif Identifier Code");
return;
}
exif_process_TIFF_in_JPEG(ImageInfo, CharBuf + 8, length - 8, displacement+8 TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process APP1/EXIF done");
#endif
}
/* }}} */
/* {{{ exif_process_APP12
Process an JPEG APP12 block marker used by OLYMPUS
*/
static void exif_process_APP12(image_info_type *ImageInfo, char *buffer, size_t length TSRMLS_DC)
{
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company", TAG_NONE, TAG_FMT_STRING, l1, buffer+2 TSRMLS_CC);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1-1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info", TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1 TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section APP12 with l1=%d, l2=%d done", l1, l2);
#endif
}
/* }}} */
/* {{{ exif_scan_JPEG_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_JPEG_header(image_info_type *ImageInfo TSRMLS_DC)
{
int section, sn;
int marker = 0, last_marker = M_PSEUDO, comment_correction=1;
unsigned int ll, lh;
uchar *Data;
size_t fpos, size, got, itemlen;
jpeg_sof_info sof_info;
for(section=0;;section++) {
#ifdef EXIF_DEBUG
fpos = php_stream_tell(ImageInfo->infile);
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Needing section %d @ 0x%08X", ImageInfo->file.count, fpos);
#endif
/* get marker byte, swallowing possible padding */
/* some software does not count the length bytes of COM section */
/* one company doing so is very much envolved in JPEG... so we accept too */
if (last_marker==M_COM && comment_correction) {
comment_correction = 2;
}
do {
if ((marker = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if (last_marker==M_COM && comment_correction>0) {
if (marker!=0xFF) {
marker = 0xff;
comment_correction--;
} else {
last_marker = M_PSEUDO; /* stop skipping 0 for M_COM */
}
}
} while (marker == 0xff);
if (last_marker==M_COM && !comment_correction) {
exif_error_docref("exif_read_data#error_mcom" EXIFERR_CC, ImageInfo, E_NOTICE, "Image has corrupt COM section: some software set wrong length information");
}
if (last_marker==M_COM && comment_correction)
return M_EOI; /* ah illegal: char after COM section not 0xFF */
fpos = php_stream_tell(ImageInfo->infile);
if (marker == 0xff) {
/* 0xff is legal padding, but if we get that many, something's wrong. */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "To many padding bytes");
return FALSE;
}
/* Read the length of the section. */
if ((lh = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
if ((ll = php_stream_getc(ImageInfo->infile)) == EOF) {
EXIF_ERRLOG_CORRUPT(ImageInfo)
return FALSE;
}
itemlen = (lh << 8) | ll;
if (itemlen < 2) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "%s, Section length: 0x%02X%02X", EXIF_ERROR_CORRUPT, lh, ll);
#else
EXIF_ERRLOG_CORRUPT(ImageInfo)
#endif
return FALSE;
}
sn = exif_file_sections_add(ImageInfo, marker, itemlen+1, NULL);
Data = ImageInfo->file.list[sn].data;
/* Store first two pre-read bytes. */
Data[0] = (uchar)lh;
Data[1] = (uchar)ll;
got = php_stream_read(ImageInfo->infile, (char*)(Data+2), itemlen-2); /* Read the whole section. */
if (got != itemlen-2) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error reading from file: got=x%04X(=%d) != itemlen-2=x%04X(=%d)", got, got, itemlen-2, itemlen-2);
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process section(x%02X=%s) @ x%04X + x%04X(=%d)", marker, exif_get_markername(marker), fpos, itemlen, itemlen);
#endif
switch(marker) {
case M_SOS: /* stop before hitting compressed data */
/* If reading entire image is requested, read the rest of the data. */
if (ImageInfo->read_all) {
/* Determine how much file is left. */
fpos = php_stream_tell(ImageInfo->infile);
size = ImageInfo->FileSize - fpos;
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, size, NULL);
Data = ImageInfo->file.list[sn].data;
got = php_stream_read(ImageInfo->infile, (char*)Data, size);
if (got != size) {
EXIF_ERRLOG_FILEEOF(ImageInfo)
return FALSE;
}
}
return TRUE;
case M_EOI: /* in case it's a tables-only JPEG stream */
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "No image in jpeg!");
return (ImageInfo->sections_found&(~FOUND_COMPUTED)) ? TRUE : FALSE;
case M_COM: /* Comment section */
exif_process_COM(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_EXIF:
if (!(ImageInfo->sections_found&FOUND_IFD0)) {
/*ImageInfo->sections_found |= FOUND_EXIF;*/
/* Seen files from some 'U-lead' software with Vivitar scanner
that uses marker 31 later in the file (no clue what for!) */
exif_process_APP1(ImageInfo, (char *)Data, itemlen, fpos TSRMLS_CC);
}
break;
case M_APP12:
exif_process_APP12(ImageInfo, (char *)Data, itemlen TSRMLS_CC);
break;
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
if ((itemlen - 2) < 6) {
return FALSE;
}
exif_process_SOFn(Data, marker, &sof_info);
ImageInfo->Width = sof_info.width;
ImageInfo->Height = sof_info.height;
if (sof_info.num_components == 3) {
ImageInfo->IsColor = 1;
} else {
ImageInfo->IsColor = 0;
}
break;
default:
/* skip any other marker silently. */
break;
}
/* keep track of last marker */
last_marker = marker;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Done");
#endif
return TRUE;
}
/* }}} */
/* {{{ exif_scan_thumbnail
* scan JPEG in thumbnail (memory) */
static int exif_scan_thumbnail(image_info_type *ImageInfo TSRMLS_DC)
{
uchar c, *data = (uchar*)ImageInfo->Thumbnail.data;
int n, marker;
size_t length=2, pos=0;
jpeg_sof_info sof_info;
if (!data) {
return FALSE; /* nothing to do here */
}
if (memcmp(data, "\xFF\xD8\xFF", 3)) {
if (!ImageInfo->Thumbnail.width && !ImageInfo->Thumbnail.height) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Thumbnail is not a JPEG image");
}
return FALSE;
}
for (;;) {
pos += length;
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
c = data[pos++];
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
if (c != 0xFF) {
return FALSE;
}
n = 8;
while ((c = data[pos++]) == 0xFF && n--) {
if (pos+3>=ImageInfo->Thumbnail.size)
return FALSE;
/* +3 = pos++ of next check when reaching marker + 2 bytes for length */
}
if (c == 0xFF)
return FALSE;
marker = c;
length = php_jpg_get16(data+pos);
if (pos+length>=ImageInfo->Thumbnail.size) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process section(x%02X=%s) @ x%04X + x%04X", marker, exif_get_markername(marker), pos, length);
#endif
switch (marker) {
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
/* handle SOFn block */
exif_process_SOFn(data+pos, marker, &sof_info);
ImageInfo->Thumbnail.height = sof_info.height;
ImageInfo->Thumbnail.width = sof_info.width;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size: %d * %d", sof_info.width, sof_info.height);
#endif
return TRUE;
case M_SOS:
case M_EOI:
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
break;
default:
/* just skip */
break;
}
}
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
}
/* }}} */
/* {{{ exif_process_IFD_in_TIFF
* Parse the TIFF header; */
static int exif_process_IFD_in_TIFF(image_info_type *ImageInfo, size_t dir_offset, int section_index TSRMLS_DC)
{
int i, sn, num_entries, sub_section_index = 0;
unsigned char *dir_entry;
char tagname[64];
size_t ifd_size, dir_size, entry_offset, next_offset, entry_length, entry_value=0, fgot;
int entry_tag , entry_type;
tag_table_type tag_table = exif_get_tag_table(section_index);
if (ImageInfo->ifd_nesting_level > MAX_IFD_NESTING_LEVEL) {
return FALSE;
}
if (ImageInfo->FileSize >= dir_offset+2) {
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, 2, NULL);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, 2);
#endif
php_stream_seek(ImageInfo->infile, dir_offset, SEEK_SET); /* we do not know the order of sections */
php_stream_read(ImageInfo->infile, (char*)ImageInfo->file.list[sn].data, 2);
num_entries = php_ifd_get16u(ImageInfo->file.list[sn].data, ImageInfo->motorola_intel);
dir_size = 2/*num dir entries*/ +12/*length of entry*/*num_entries +4/* offset to next ifd (points to thumbnail or NULL)*/;
if (ImageInfo->FileSize >= dir_offset+dir_size) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD dir(x%04X + x%04X), IFD entries(%d)", ImageInfo->FileSize, dir_offset+2, dir_size-2, num_entries);
#endif
if (exif_file_sections_realloc(ImageInfo, sn, dir_size TSRMLS_CC)) {
return FALSE;
}
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+2), dir_size-2);
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Dump: %s", exif_char_dump(ImageInfo->file.list[sn].data, dir_size, 0));*/
next_offset = php_ifd_get32u(ImageInfo->file.list[sn].data + dir_size - 4, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF done, next offset x%04X", next_offset);
#endif
/* now we have the directory we can look how long it should be */
ifd_size = dir_size;
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
if (entry_type > NUM_FORMATS) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: tag(0x%04X,%12s): Illegal format code 0x%04X, switching to BYTE", entry_tag, exif_get_tagname(entry_tag, tagname, -12, tag_table TSRMLS_CC), entry_type);
/* Since this is repeated in exif_process_IFD_TAG make it a notice here */
/* and make it a warning in the exif_process_IFD_TAG which is called */
/* elsewhere. */
entry_type = TAG_FMT_BYTE;
/*The next line would break the image on writeback: */
/* php_ifd_set16u(dir_entry+2, entry_type, ImageInfo->motorola_intel);*/
}
entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel) * php_tiff_bytes_per_format[entry_type];
if (entry_length <= 4) {
switch(entry_type) {
case TAG_FMT_USHORT:
entry_value = php_ifd_get16u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SSHORT:
entry_value = php_ifd_get16s(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_ULONG:
entry_value = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
break;
case TAG_FMT_SLONG:
entry_value = php_ifd_get32s(dir_entry+8, ImageInfo->motorola_intel);
break;
}
switch(entry_tag) {
case TAG_IMAGEWIDTH:
case TAG_COMP_IMAGE_WIDTH:
ImageInfo->Width = entry_value;
break;
case TAG_IMAGEHEIGHT:
case TAG_COMP_IMAGE_HEIGHT:
ImageInfo->Height = entry_value;
break;
case TAG_PHOTOMETRIC_INTERPRETATION:
switch (entry_value) {
case PMI_BLACK_IS_ZERO:
case PMI_WHITE_IS_ZERO:
case PMI_TRANSPARENCY_MASK:
ImageInfo->IsColor = 0;
break;
case PMI_RGB:
case PMI_PALETTE_COLOR:
case PMI_SEPARATED:
case PMI_YCBCR:
case PMI_CIELAB:
ImageInfo->IsColor = 1;
break;
}
break;
}
} else {
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
/* if entry needs expading ifd cache and entry is at end of current ifd cache. */
/* otherwise there may be huge holes between two entries */
if (entry_offset + entry_length > dir_offset + ifd_size
&& entry_offset == dir_offset + ifd_size) {
ifd_size = entry_offset + entry_length - dir_offset;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Resize struct: x%04X + x%04X - x%04X = x%04X", entry_offset, entry_length, dir_offset, ifd_size);
#endif
}
}
}
if (ImageInfo->FileSize >= dir_offset + ImageInfo->file.list[sn].size) {
if (ifd_size > dir_size) {
if (dir_offset + ifd_size > ImageInfo->FileSize) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
return FALSE;
}
if (exif_file_sections_realloc(ImageInfo, sn, ifd_size TSRMLS_CC)) {
return FALSE;
}
/* read values not stored in directory itself */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF: filesize(x%04X), IFD(x%04X + x%04X)", ImageInfo->FileSize, dir_offset, ifd_size);
#endif
php_stream_read(ImageInfo->infile, (char*)(ImageInfo->file.list[sn].data+dir_size), ifd_size-dir_size);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read from TIFF, done");
#endif
}
/* now process the tags */
for(i=0;i<num_entries;i++) {
dir_entry = ImageInfo->file.list[sn].data+2+i*12;
entry_tag = php_ifd_get16u(dir_entry+0, ImageInfo->motorola_intel);
entry_type = php_ifd_get16u(dir_entry+2, ImageInfo->motorola_intel);
/*entry_length = php_ifd_get32u(dir_entry+4, ImageInfo->motorola_intel);*/
if (entry_tag == TAG_EXIF_IFD_POINTER ||
entry_tag == TAG_INTEROP_IFD_POINTER ||
entry_tag == TAG_GPS_IFD_POINTER ||
entry_tag == TAG_SUB_IFD
) {
switch(entry_tag) {
case TAG_EXIF_IFD_POINTER:
ImageInfo->sections_found |= FOUND_EXIF;
sub_section_index = SECTION_EXIF;
break;
case TAG_GPS_IFD_POINTER:
ImageInfo->sections_found |= FOUND_GPS;
sub_section_index = SECTION_GPS;
break;
case TAG_INTEROP_IFD_POINTER:
ImageInfo->sections_found |= FOUND_INTEROP;
sub_section_index = SECTION_INTEROP;
break;
case TAG_SUB_IFD:
ImageInfo->sections_found |= FOUND_THUMBNAIL;
sub_section_index = SECTION_THUMBNAIL;
break;
}
entry_offset = php_ifd_get32u(dir_entry+8, ImageInfo->motorola_intel);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s @x%04X", exif_get_sectionname(sub_section_index), entry_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, entry_offset, sub_section_index TSRMLS_CC);
if (section_index!=SECTION_THUMBNAIL && entry_tag==TAG_SUB_IFD) {
if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN
&& ImageInfo->Thumbnail.size
&& ImageInfo->Thumbnail.offset
&& ImageInfo->read_thumbnail
) {
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
efree(ImageInfo->Thumbnail.data);
ImageInfo->Thumbnail.data = NULL;
} else {
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Next IFD: %s done", exif_get_sectionname(sub_section_index));
#endif
} else {
if (!exif_process_IFD_TAG(ImageInfo, (char*)dir_entry,
(char*)(ImageInfo->file.list[sn].data-dir_offset),
ifd_size, 0, section_index, 0, tag_table TSRMLS_CC)) {
return FALSE;
}
}
}
/* If we had a thumbnail in a SUB_IFD we have ANOTHER image in NEXT IFD */
if (next_offset && section_index != SECTION_THUMBNAIL) {
/* this should be a thumbnail IFD */
/* the thumbnail itself is stored at Tag=StripOffsets */
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) at x%04X", next_offset);
#endif
ImageInfo->ifd_nesting_level++;
exif_process_IFD_in_TIFF(ImageInfo, next_offset, SECTION_THUMBNAIL TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "%s THUMBNAIL @0x%04X + 0x%04X", ImageInfo->Thumbnail.data ? "Ignore" : "Read", ImageInfo->Thumbnail.offset, ImageInfo->Thumbnail.size);
#endif
if (!ImageInfo->Thumbnail.data && ImageInfo->Thumbnail.offset && ImageInfo->Thumbnail.size && ImageInfo->read_thumbnail) {
ImageInfo->Thumbnail.data = safe_emalloc(ImageInfo->Thumbnail.size, 1, 0);
php_stream_seek(ImageInfo->infile, ImageInfo->Thumbnail.offset, SEEK_SET);
fgot = php_stream_read(ImageInfo->infile, ImageInfo->Thumbnail.data, ImageInfo->Thumbnail.size);
if (fgot < ImageInfo->Thumbnail.size) {
EXIF_ERRLOG_THUMBEOF(ImageInfo)
efree(ImageInfo->Thumbnail.data);
ImageInfo->Thumbnail.data = NULL;
} else {
exif_thumbnail_build(ImageInfo TSRMLS_CC);
}
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Read next IFD (THUMBNAIL) done");
#endif
}
return TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD(x%04X)", ImageInfo->FileSize, dir_offset+ImageInfo->file.list[sn].size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than size of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+dir_size);
return FALSE;
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Error in TIFF: filesize(x%04X) less than start of IFD dir(x%04X)", ImageInfo->FileSize, dir_offset+2);
return FALSE;
}
}
/* }}} */
/* {{{ exif_scan_FILE_header
* Parse the marker stream until SOS or EOI is seen; */
static int exif_scan_FILE_header(image_info_type *ImageInfo TSRMLS_DC)
{
unsigned char file_header[8];
int ret = FALSE;
ImageInfo->FileType = IMAGE_FILETYPE_UNKNOWN;
if (ImageInfo->FileSize >= 2) {
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
if (php_stream_read(ImageInfo->infile, (char*)file_header, 2) != 2) {
return FALSE;
}
if ((file_header[0]==0xff) && (file_header[1]==M_SOI)) {
ImageInfo->FileType = IMAGE_FILETYPE_JPEG;
if (exif_scan_JPEG_header(ImageInfo TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid JPEG file");
}
} else if (ImageInfo->FileSize >= 8) {
if (php_stream_read(ImageInfo->infile, (char*)(file_header+2), 6) != 6) {
return FALSE;
}
if (!memcmp(file_header, "II\x2A\x00", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_II;
ImageInfo->motorola_intel = 0;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/II format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else if (!memcmp(file_header, "MM\x00\x2a", 4)) {
ImageInfo->FileType = IMAGE_FILETYPE_TIFF_MM;
ImageInfo->motorola_intel = 1;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "File has TIFF/MM format");
#endif
ImageInfo->sections_found |= FOUND_IFD0;
if (exif_process_IFD_in_TIFF(ImageInfo,
php_ifd_get32u(file_header + 4, ImageInfo->motorola_intel),
SECTION_IFD0 TSRMLS_CC)) {
ret = TRUE;
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Invalid TIFF file");
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File not supported");
return FALSE;
}
}
} else {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "File too small (%d)", ImageInfo->FileSize);
}
return ret;
}
/* }}} */
/* {{{ exif_discard_imageinfo
Discard data scanned by exif_read_file.
*/
static int exif_discard_imageinfo(image_info_type *ImageInfo)
{
int i;
EFREE_IF(ImageInfo->FileName);
EFREE_IF(ImageInfo->UserComment);
EFREE_IF(ImageInfo->UserCommentEncoding);
EFREE_IF(ImageInfo->Copyright);
EFREE_IF(ImageInfo->CopyrightPhotographer);
EFREE_IF(ImageInfo->CopyrightEditor);
EFREE_IF(ImageInfo->Thumbnail.data);
EFREE_IF(ImageInfo->encode_unicode);
EFREE_IF(ImageInfo->decode_unicode_be);
EFREE_IF(ImageInfo->decode_unicode_le);
EFREE_IF(ImageInfo->encode_jis);
EFREE_IF(ImageInfo->decode_jis_be);
EFREE_IF(ImageInfo->decode_jis_le);
EFREE_IF(ImageInfo->make);
EFREE_IF(ImageInfo->model);
for (i=0; i<ImageInfo->xp_fields.count; i++) {
EFREE_IF(ImageInfo->xp_fields.list[i].value);
}
EFREE_IF(ImageInfo->xp_fields.list);
for (i=0; i<SECTION_COUNT; i++) {
exif_iif_free(ImageInfo, i);
}
exif_file_sections_free(ImageInfo);
memset(ImageInfo, 0, sizeof(*ImageInfo));
return TRUE;
}
/* }}} */
/* {{{ exif_read_file
*/
static int exif_read_file(image_info_type *ImageInfo, char *FileName, int read_thumbnail, int read_all TSRMLS_DC)
{
int ret;
struct stat st;
/* Start with an empty image information structure. */
memset(ImageInfo, 0, sizeof(*ImageInfo));
ImageInfo->motorola_intel = -1; /* flag as unknown */
ImageInfo->infile = php_stream_open_wrapper(FileName, "rb", STREAM_MUST_SEEK|IGNORE_PATH, NULL);
if (!ImageInfo->infile) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Unable to open file");
return FALSE;
}
if (php_stream_is(ImageInfo->infile, PHP_STREAM_IS_STDIO)) {
if (VCWD_STAT(FileName, &st) >= 0) {
if ((st.st_mode & S_IFMT) != S_IFREG) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Not a file");
php_stream_close(ImageInfo->infile);
return FALSE;
}
/* Store file date/time. */
ImageInfo->FileDateTime = st.st_mtime;
ImageInfo->FileSize = st.st_size;
/*exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Opened stream is file: %d", ImageInfo->FileSize);*/
}
} else {
if (!ImageInfo->FileSize) {
php_stream_seek(ImageInfo->infile, 0, SEEK_END);
ImageInfo->FileSize = php_stream_tell(ImageInfo->infile);
php_stream_seek(ImageInfo->infile, 0, SEEK_SET);
}
}
php_basename(FileName, strlen(FileName), NULL, 0, &(ImageInfo->FileName), NULL TSRMLS_CC);
ImageInfo->read_thumbnail = read_thumbnail;
ImageInfo->read_all = read_all;
ImageInfo->Thumbnail.filetype = IMAGE_FILETYPE_UNKNOWN;
ImageInfo->encode_unicode = safe_estrdup(EXIF_G(encode_unicode));
ImageInfo->decode_unicode_be = safe_estrdup(EXIF_G(decode_unicode_be));
ImageInfo->decode_unicode_le = safe_estrdup(EXIF_G(decode_unicode_le));
ImageInfo->encode_jis = safe_estrdup(EXIF_G(encode_jis));
ImageInfo->decode_jis_be = safe_estrdup(EXIF_G(decode_jis_be));
ImageInfo->decode_jis_le = safe_estrdup(EXIF_G(decode_jis_le));
ImageInfo->ifd_nesting_level = 0;
/* Scan the JPEG headers. */
ret = exif_scan_FILE_header(ImageInfo TSRMLS_CC);
php_stream_close(ImageInfo->infile);
return ret;
}
/* }}} */
/* {{{ proto array exif_read_data(string filename [, sections_needed [, sub_arrays[, read_thumbnail]]])
Reads header data from the JPEG/TIFF image filename and optionally reads the internal thumbnails */
PHP_FUNCTION(exif_read_data)
{
char *p_name, *p_sections_needed = NULL;
int p_name_len, p_sections_needed_len = 0;
zend_bool sub_arrays=0, read_thumbnail=0, read_all=0;
int i, ret, sections_needed=0;
image_info_type ImageInfo;
char tmp[64], *sections_str, *s;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbb", &p_name, &p_name_len, &p_sections_needed, &p_sections_needed_len, &sub_arrays, &read_thumbnail) == FAILURE) {
return;
}
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (p_sections_needed) {
spprintf(§ions_str, 0, ",%s,", p_sections_needed);
/* sections_str DOES start with , and SPACES are NOT allowed in names */
s = sections_str;
while (*++s) {
if (*s == ' ') {
*s = ',';
}
}
for (i = 0; i < SECTION_COUNT; i++) {
snprintf(tmp, sizeof(tmp), ",%s,", exif_get_sectionname(i));
if (strstr(sections_str, tmp)) {
sections_needed |= 1<<i;
}
}
EFREE_IF(sections_str);
/* now see what we need */
#ifdef EXIF_DEBUG
sections_str = exif_get_sectionlist(sections_needed TSRMLS_CC);
if (!sections_str) {
RETURN_FALSE;
}
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections needed: %s", sections_str[0] ? sections_str : "None");
EFREE_IF(sections_str);
#endif
}
ret = exif_read_file(&ImageInfo, p_name, read_thumbnail, read_all TSRMLS_CC);
sections_str = exif_get_sectionlist(ImageInfo.sections_found TSRMLS_CC);
#ifdef EXIF_DEBUG
if (sections_str)
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Sections found: %s", sections_str[0] ? sections_str : "None");
#endif
ImageInfo.sections_found |= FOUND_COMPUTED|FOUND_FILE;/* do not inform about in debug*/
if (ret == FALSE || (sections_needed && !(sections_needed&ImageInfo.sections_found))) {
/* array_init must be checked at last! otherwise the array must be freed if a later test fails. */
exif_discard_imageinfo(&ImageInfo);
EFREE_IF(sections_str);
RETURN_FALSE;
}
array_init(return_value);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section FILE");
#endif
/* now we can add our information */
exif_iif_add_str(&ImageInfo, SECTION_FILE, "FileName", ImageInfo.FileName TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileDateTime", ImageInfo.FileDateTime TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileSize", ImageInfo.FileSize TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_FILE, "FileType", ImageInfo.FileType TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "MimeType", (char*)php_image_type_to_mime_type(ImageInfo.FileType) TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_FILE, "SectionsFound", sections_str ? sections_str : "NONE" TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Generate section COMPUTED");
#endif
if (ImageInfo.Width>0 && ImageInfo.Height>0) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "html" TSRMLS_CC, "width=\"%d\" height=\"%d\"", ImageInfo.Width, ImageInfo.Height);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Height", ImageInfo.Height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Width", ImageInfo.Width TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "IsColor", ImageInfo.IsColor TSRMLS_CC);
if (ImageInfo.motorola_intel != -1) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "ByteOrderMotorola", ImageInfo.motorola_intel TSRMLS_CC);
}
if (ImageInfo.FocalLength) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocalLength" TSRMLS_CC, "%4.1Fmm", ImageInfo.FocalLength);
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "35mmFocalLength" TSRMLS_CC, "%dmm", (int)(ImageInfo.FocalLength/ImageInfo.CCDWidth*35+0.5));
}
}
if(ImageInfo.CCDWidth) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "CCDWidth" TSRMLS_CC, "%dmm", (int)ImageInfo.CCDWidth);
}
if(ImageInfo.ExposureTime>0) {
if(ImageInfo.ExposureTime <= 0.5) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s (1/%d)", ImageInfo.ExposureTime, (int)(0.5 + 1/ImageInfo.ExposureTime));
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ExposureTime" TSRMLS_CC, "%0.3F s", ImageInfo.ExposureTime);
}
}
if(ImageInfo.ApertureFNumber) {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "ApertureFNumber" TSRMLS_CC, "f/%.1F", ImageInfo.ApertureFNumber);
}
if(ImageInfo.Distance) {
if(ImageInfo.Distance<0) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "FocusDistance", "Infinite" TSRMLS_CC);
} else {
exif_iif_add_fmt(&ImageInfo, SECTION_COMPUTED, "FocusDistance" TSRMLS_CC, "%0.2Fm", ImageInfo.Distance);
}
}
if (ImageInfo.UserComment) {
exif_iif_add_buffer(&ImageInfo, SECTION_COMPUTED, "UserComment", ImageInfo.UserCommentLength, ImageInfo.UserComment TSRMLS_CC);
if (ImageInfo.UserCommentEncoding && strlen(ImageInfo.UserCommentEncoding)) {
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "UserCommentEncoding", ImageInfo.UserCommentEncoding TSRMLS_CC);
}
}
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright", ImageInfo.Copyright TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Photographer", ImageInfo.CopyrightPhotographer TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Copyright.Editor", ImageInfo.CopyrightEditor TSRMLS_CC);
for (i=0; i<ImageInfo.xp_fields.count; i++) {
exif_iif_add_str(&ImageInfo, SECTION_WINXP, exif_get_tagname(ImageInfo.xp_fields.list[i].tag, NULL, 0, exif_get_tag_table(SECTION_WINXP) TSRMLS_CC), ImageInfo.xp_fields.list[i].value TSRMLS_CC);
}
if (ImageInfo.Thumbnail.size) {
if (read_thumbnail) {
/* not exif_iif_add_str : this is a buffer */
exif_iif_add_tag(&ImageInfo, SECTION_THUMBNAIL, "THUMBNAIL", TAG_NONE, TAG_FMT_UNDEFINED, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.data TSRMLS_CC);
}
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
/* try to evaluate if thumbnail data is present */
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.FileType", ImageInfo.Thumbnail.filetype TSRMLS_CC);
exif_iif_add_str(&ImageInfo, SECTION_COMPUTED, "Thumbnail.MimeType", (char*)php_image_type_to_mime_type(ImageInfo.Thumbnail.filetype) TSRMLS_CC);
}
if (ImageInfo.Thumbnail.width && ImageInfo.Thumbnail.height) {
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Height", ImageInfo.Thumbnail.height TSRMLS_CC);
exif_iif_add_int(&ImageInfo, SECTION_COMPUTED, "Thumbnail.Width", ImageInfo.Thumbnail.width TSRMLS_CC);
}
EFREE_IF(sections_str);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Adding image infos");
#endif
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FILE TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMPUTED TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_ANY_TAG TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_IFD0 TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_THUMBNAIL TSRMLS_CC);
add_assoc_image_info(return_value, 1, &ImageInfo, SECTION_COMMENT TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_EXIF TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_GPS TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_INTEROP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_FPIX TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_APP12 TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_WINXP TSRMLS_CC);
add_assoc_image_info(return_value, sub_arrays, &ImageInfo, SECTION_MAKERNOTE TSRMLS_CC);
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, Z_STRVAL_PP(p_name), E_NOTICE, "done");
#endif
}
/* }}} */
/* {{{ proto string exif_thumbnail(string filename [, &width, &height [, &imagetype]])
Reads the embedded thumbnail */
PHP_FUNCTION(exif_thumbnail)
{
zval *p_width = 0, *p_height = 0, *p_imagetype = 0;
char *p_name;
int p_name_len, ret, arg_c = ZEND_NUM_ARGS();
image_info_type ImageInfo;
memset(&ImageInfo, 0, sizeof(ImageInfo));
if (arg_c!=1 && arg_c!=3 && arg_c!=4) {
WRONG_PARAM_COUNT;
}
if (zend_parse_parameters(arg_c TSRMLS_CC, "p|z/z/z/", &p_name, &p_name_len, &p_width, &p_height, &p_imagetype) == FAILURE) {
return;
}
ret = exif_read_file(&ImageInfo, p_name, 1, 0 TSRMLS_CC);
if (ret==FALSE) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Thumbnail data %d %d %d, %d x %d", ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, ImageInfo.Thumbnail.filetype, ImageInfo.Thumbnail.width, ImageInfo.Thumbnail.height);
#endif
if (!ImageInfo.Thumbnail.data || !ImageInfo.Thumbnail.size) {
exif_discard_imageinfo(&ImageInfo);
RETURN_FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Returning thumbnail(%d)", ImageInfo.Thumbnail.size);
#endif
ZVAL_STRINGL(return_value, ImageInfo.Thumbnail.data, ImageInfo.Thumbnail.size, 1);
if (arg_c >= 3) {
if (!ImageInfo.Thumbnail.width || !ImageInfo.Thumbnail.height) {
exif_scan_thumbnail(&ImageInfo TSRMLS_CC);
}
zval_dtor(p_width);
zval_dtor(p_height);
ZVAL_LONG(p_width, ImageInfo.Thumbnail.width);
ZVAL_LONG(p_height, ImageInfo.Thumbnail.height);
}
if (arg_c >= 4) {
zval_dtor(p_imagetype);
ZVAL_LONG(p_imagetype, ImageInfo.Thumbnail.filetype);
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, &ImageInfo, E_NOTICE, "Discarding info");
#endif
exif_discard_imageinfo(&ImageInfo);
#ifdef EXIF_DEBUG
php_error_docref1(NULL TSRMLS_CC, p_name, E_NOTICE, "Done");
#endif
}
/* }}} */
/* {{{ proto int exif_imagetype(string imagefile)
Get the type of an image */
PHP_FUNCTION(exif_imagetype)
{
char *imagefile;
int imagefile_len;
php_stream * stream;
int itype = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &imagefile, &imagefile_len) == FAILURE) {
return;
}
stream = php_stream_open_wrapper(imagefile, "rb", IGNORE_PATH|REPORT_ERRORS, NULL);
if (stream == NULL) {
RETURN_FALSE;
}
itype = php_getimagetype(stream, NULL TSRMLS_CC);
php_stream_close(stream);
if (itype == IMAGE_FILETYPE_UNKNOWN) {
RETURN_FALSE;
} else {
ZVAL_LONG(return_value, itype);
}
}
/* }}} */
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: sw=4 ts=4 tw=78 fdm=marker
* vim<600: sw=4 ts=4 tw=78
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_4822_0 |
crossvul-cpp_data_good_2046_0 | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* RDP Licensing
*
* Copyright 2011-2013 Marc-Andre Moreau <marcandre.moreau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <winpr/crt.h>
#include "redirection.h"
#include "certificate.h"
#include "license.h"
/* #define LICENSE_NULL_CLIENT_RANDOM 1 */
/* #define LICENSE_NULL_PREMASTER_SECRET 1 */
#ifdef WITH_DEBUG_LICENSE
static const char* const LICENSE_MESSAGE_STRINGS[] =
{
"",
"License Request",
"Platform Challenge",
"New License",
"Upgrade License",
"", "", "", "", "", "",
"", "", "", "", "", "",
"",
"License Info",
"New License Request",
"",
"Platform Challenge Response",
"", "", "", "", "", "", "", "", "",
"Error Alert"
};
static const char* const error_codes[] =
{
"ERR_UNKNOWN",
"ERR_INVALID_SERVER_CERTIFICATE",
"ERR_NO_LICENSE",
"ERR_INVALID_MAC",
"ERR_INVALID_SCOPE",
"ERR_UNKNOWN",
"ERR_NO_LICENSE_SERVER",
"STATUS_VALID_CLIENT",
"ERR_INVALID_CLIENT",
"ERR_UNKNOWN",
"ERR_UNKNOWN",
"ERR_INVALID_PRODUCT_ID",
"ERR_INVALID_MESSAGE_LENGTH"
};
static const char* const state_transitions[] =
{
"ST_UNKNOWN",
"ST_TOTAL_ABORT",
"ST_NO_TRANSITION",
"ST_RESET_PHASE_TO_START",
"ST_RESEND_LAST_MESSAGE"
};
void license_print_product_info(PRODUCT_INFO* productInfo)
{
char* CompanyName = NULL;
char* ProductId = NULL;
ConvertFromUnicode(CP_UTF8, 0, (WCHAR*) productInfo->pbCompanyName,
productInfo->cbCompanyName / 2, &CompanyName, 0, NULL, NULL);
ConvertFromUnicode(CP_UTF8, 0, (WCHAR*) productInfo->pbProductId,
productInfo->cbProductId / 2, &ProductId, 0, NULL, NULL);
fprintf(stderr, "ProductInfo:\n");
fprintf(stderr, "\tdwVersion: 0x%08X\n", productInfo->dwVersion);
fprintf(stderr, "\tCompanyName: %s\n", CompanyName);
fprintf(stderr, "\tProductId: %s\n", ProductId);
free(CompanyName);
free(ProductId);
}
void license_print_scope_list(SCOPE_LIST* scopeList)
{
int index;
LICENSE_BLOB* scope;
fprintf(stderr, "ScopeList (%d):\n", scopeList->count);
for (index = 0; index < scopeList->count; index++)
{
scope = &scopeList->array[index];
fprintf(stderr, "\t%s\n", (char*) scope->data);
}
}
#endif
/**
* Read a licensing preamble.\n
* @msdn{cc240480}
* @param s stream
* @param bMsgType license message type
* @param flags message flags
* @param wMsgSize message size
* @return if the operation completed successfully
*/
BOOL license_read_preamble(wStream* s, BYTE* bMsgType, BYTE* flags, UINT16* wMsgSize)
{
/* preamble (4 bytes) */
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT8(s, *bMsgType); /* bMsgType (1 byte) */
Stream_Read_UINT8(s, *flags); /* flags (1 byte) */
Stream_Read_UINT16(s, *wMsgSize); /* wMsgSize (2 bytes) */
return TRUE;
}
/**
* Write a licensing preamble.\n
* @msdn{cc240480}
* @param s stream
* @param bMsgType license message type
* @param flags message flags
* @param wMsgSize message size
*/
void license_write_preamble(wStream* s, BYTE bMsgType, BYTE flags, UINT16 wMsgSize)
{
/* preamble (4 bytes) */
Stream_Write_UINT8(s, bMsgType); /* bMsgType (1 byte) */
Stream_Write_UINT8(s, flags); /* flags (1 byte) */
Stream_Write_UINT16(s, wMsgSize); /* wMsgSize (2 bytes) */
}
/**
* Initialize a license packet stream.\n
* @param license license module
* @return stream
*/
wStream* license_send_stream_init(rdpLicense* license)
{
wStream* s;
s = Stream_New(NULL, 4096);
Stream_Seek(s, LICENSE_PACKET_HEADER_MAX_LENGTH);
return s;
}
/**
* Send an RDP licensing packet.\n
* @msdn{cc240479}
* @param license license module
* @param s stream
*/
BOOL license_send(rdpLicense* license, wStream* s, BYTE type)
{
int length;
BYTE flags;
UINT16 wMsgSize;
UINT16 sec_flags;
DEBUG_LICENSE("Sending %s Packet", LICENSE_MESSAGE_STRINGS[type & 0x1F]);
length = Stream_GetPosition(s);
Stream_SetPosition(s, 0);
sec_flags = SEC_LICENSE_PKT;
wMsgSize = length - LICENSE_PACKET_HEADER_MAX_LENGTH + 4;
flags = PREAMBLE_VERSION_3_0;
/**
* Using EXTENDED_ERROR_MSG_SUPPORTED here would cause mstsc to crash when
* running in server mode! This flag seems to be incorrectly documented.
*/
if (!license->rdp->settings->ServerMode)
flags |= EXTENDED_ERROR_MSG_SUPPORTED;
rdp_write_header(license->rdp, s, length, MCS_GLOBAL_CHANNEL_ID);
rdp_write_security_header(s, sec_flags);
license_write_preamble(s, type, flags, wMsgSize);
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "Sending %s Packet, length %d\n", LICENSE_MESSAGE_STRINGS[type & 0x1F], wMsgSize);
winpr_HexDump(Stream_Pointer(s) - 4, wMsgSize);
#endif
Stream_SetPosition(s, length);
Stream_SealLength(s);
if (transport_write(license->rdp->transport, s) < 0)
return FALSE;
Stream_Free(s, TRUE);
return TRUE;
}
/**
* Receive an RDP licensing packet.\n
* @msdn{cc240479}
* @param license license module
* @param s stream
* @return if the operation completed successfully
*/
int license_recv(rdpLicense* license, wStream* s)
{
BYTE flags;
BYTE bMsgType;
UINT16 wMsgSize;
UINT16 length;
UINT16 channelId;
UINT16 securityFlags;
if (!rdp_read_header(license->rdp, s, &length, &channelId))
{
fprintf(stderr, "Incorrect RDP header.\n");
return -1;
}
if (!rdp_read_security_header(s, &securityFlags))
return -1;
if (securityFlags & SEC_ENCRYPT)
{
if (!rdp_decrypt(license->rdp, s, length - 4, securityFlags))
{
fprintf(stderr, "rdp_decrypt failed\n");
return -1;
}
}
if (!(securityFlags & SEC_LICENSE_PKT))
{
int status;
if (!(securityFlags & SEC_ENCRYPT))
Stream_Rewind(s, RDP_SECURITY_HEADER_LENGTH);
status = rdp_recv_out_of_sequence_pdu(license->rdp, s);
if (status < 0)
{
fprintf(stderr, "Unexpected license packet.\n");
return status;
}
return 0;
}
if (!license_read_preamble(s, &bMsgType, &flags, &wMsgSize)) /* preamble (4 bytes) */
return -1;
DEBUG_LICENSE("Receiving %s Packet", LICENSE_MESSAGE_STRINGS[bMsgType & 0x1F]);
switch (bMsgType)
{
case LICENSE_REQUEST:
if (!license_read_license_request_packet(license, s))
return -1;
license_send_new_license_request_packet(license);
break;
case PLATFORM_CHALLENGE:
if (!license_read_platform_challenge_packet(license, s))
return -1;
license_send_platform_challenge_response_packet(license);
break;
case NEW_LICENSE:
license_read_new_license_packet(license, s);
break;
case UPGRADE_LICENSE:
license_read_upgrade_license_packet(license, s);
break;
case ERROR_ALERT:
if (!license_read_error_alert_packet(license, s))
return -1;
break;
default:
fprintf(stderr, "invalid bMsgType:%d\n", bMsgType);
return FALSE;
}
return 0;
}
void license_generate_randoms(rdpLicense* license)
{
ZeroMemory(license->ClientRandom, CLIENT_RANDOM_LENGTH); /* ClientRandom */
ZeroMemory(license->PremasterSecret, PREMASTER_SECRET_LENGTH); /* PremasterSecret */
#ifndef LICENSE_NULL_CLIENT_RANDOM
crypto_nonce(license->ClientRandom, CLIENT_RANDOM_LENGTH); /* ClientRandom */
#endif
#ifndef LICENSE_NULL_PREMASTER_SECRET
crypto_nonce(license->PremasterSecret, PREMASTER_SECRET_LENGTH); /* PremasterSecret */
#endif
}
/**
* Generate License Cryptographic Keys.
* @param license license module
*/
void license_generate_keys(rdpLicense* license)
{
security_master_secret(license->PremasterSecret, license->ClientRandom,
license->ServerRandom, license->MasterSecret); /* MasterSecret */
security_session_key_blob(license->MasterSecret, license->ClientRandom,
license->ServerRandom, license->SessionKeyBlob); /* SessionKeyBlob */
security_mac_salt_key(license->SessionKeyBlob, license->ClientRandom,
license->ServerRandom, license->MacSaltKey); /* MacSaltKey */
security_licensing_encryption_key(license->SessionKeyBlob, license->ClientRandom,
license->ServerRandom, license->LicensingEncryptionKey); /* LicensingEncryptionKey */
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "ClientRandom:\n");
winpr_HexDump(license->ClientRandom, CLIENT_RANDOM_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "ServerRandom:\n");
winpr_HexDump(license->ServerRandom, SERVER_RANDOM_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "PremasterSecret:\n");
winpr_HexDump(license->PremasterSecret, PREMASTER_SECRET_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "MasterSecret:\n");
winpr_HexDump(license->MasterSecret, MASTER_SECRET_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "SessionKeyBlob:\n");
winpr_HexDump(license->SessionKeyBlob, SESSION_KEY_BLOB_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "MacSaltKey:\n");
winpr_HexDump(license->MacSaltKey, MAC_SALT_KEY_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "LicensingEncryptionKey:\n");
winpr_HexDump(license->LicensingEncryptionKey, LICENSING_ENCRYPTION_KEY_LENGTH);
fprintf(stderr, "\n");
#endif
}
/**
* Generate Unique Hardware Identifier (CLIENT_HARDWARE_ID).\n
* @param license license module
*/
void license_generate_hwid(rdpLicense* license)
{
CryptoMd5 md5;
BYTE* mac_address;
ZeroMemory(license->HardwareId, HWID_LENGTH);
mac_address = license->rdp->transport->TcpIn->mac_address;
md5 = crypto_md5_init();
crypto_md5_update(md5, mac_address, 6);
crypto_md5_final(md5, &license->HardwareId[HWID_PLATFORM_ID_LENGTH]);
}
void license_get_server_rsa_public_key(rdpLicense* license)
{
BYTE* Exponent;
BYTE* Modulus;
int ModulusLength;
if (license->ServerCertificate->length < 1)
{
certificate_read_server_certificate(license->certificate,
license->rdp->settings->ServerCertificate,
license->rdp->settings->ServerCertificateLength);
}
Exponent = license->certificate->cert_info.exponent;
Modulus = license->certificate->cert_info.Modulus;
ModulusLength = license->certificate->cert_info.ModulusLength;
CopyMemory(license->Exponent, Exponent, 4);
license->ModulusLength = ModulusLength;
license->Modulus = (BYTE*) malloc(ModulusLength);
memcpy(license->Modulus, Modulus, ModulusLength);
}
void license_encrypt_premaster_secret(rdpLicense* license)
{
BYTE* EncryptedPremasterSecret;
license_get_server_rsa_public_key(license);
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "Modulus (%d bits):\n", license->ModulusLength * 8);
winpr_HexDump(license->Modulus, license->ModulusLength);
fprintf(stderr, "\n");
fprintf(stderr, "Exponent:\n");
winpr_HexDump(license->Exponent, 4);
fprintf(stderr, "\n");
#endif
EncryptedPremasterSecret = (BYTE*) malloc(license->ModulusLength);
ZeroMemory(EncryptedPremasterSecret, license->ModulusLength);
license->EncryptedPremasterSecret->type = BB_RANDOM_BLOB;
license->EncryptedPremasterSecret->length = PREMASTER_SECRET_LENGTH;
#ifndef LICENSE_NULL_PREMASTER_SECRET
license->EncryptedPremasterSecret->length =
crypto_rsa_public_encrypt(license->PremasterSecret, PREMASTER_SECRET_LENGTH,
license->ModulusLength, license->Modulus, license->Exponent, EncryptedPremasterSecret);
#endif
license->EncryptedPremasterSecret->data = EncryptedPremasterSecret;
}
void license_decrypt_platform_challenge(rdpLicense* license)
{
CryptoRc4 rc4;
license->PlatformChallenge->data = (BYTE*) malloc(license->EncryptedPlatformChallenge->length);
license->PlatformChallenge->length = license->EncryptedPlatformChallenge->length;
rc4 = crypto_rc4_init(license->LicensingEncryptionKey, LICENSING_ENCRYPTION_KEY_LENGTH);
crypto_rc4(rc4, license->EncryptedPlatformChallenge->length,
license->EncryptedPlatformChallenge->data,
license->PlatformChallenge->data);
crypto_rc4_free(rc4);
}
/**
* Read Product Information (PRODUCT_INFO).\n
* @msdn{cc241915}
* @param s stream
* @param productInfo product information
*/
BOOL license_read_product_info(wStream* s, PRODUCT_INFO* productInfo)
{
if (Stream_GetRemainingLength(s) < 8)
return FALSE;
Stream_Read_UINT32(s, productInfo->dwVersion); /* dwVersion (4 bytes) */
Stream_Read_UINT32(s, productInfo->cbCompanyName); /* cbCompanyName (4 bytes) */
if (Stream_GetRemainingLength(s) < productInfo->cbCompanyName + 4)
return FALSE;
productInfo->pbCompanyName = (BYTE*) malloc(productInfo->cbCompanyName);
Stream_Read(s, productInfo->pbCompanyName, productInfo->cbCompanyName);
Stream_Read_UINT32(s, productInfo->cbProductId); /* cbProductId (4 bytes) */
if (Stream_GetRemainingLength(s) < productInfo->cbProductId)
{
free(productInfo->pbCompanyName);
productInfo->pbCompanyName = NULL;
return FALSE;
}
productInfo->pbProductId = (BYTE*) malloc(productInfo->cbProductId);
Stream_Read(s, productInfo->pbProductId, productInfo->cbProductId);
return TRUE;
}
/**
* Allocate New Product Information (PRODUCT_INFO).\n
* @msdn{cc241915}
* @return new product information
*/
PRODUCT_INFO* license_new_product_info()
{
PRODUCT_INFO* productInfo;
productInfo = (PRODUCT_INFO*) malloc(sizeof(PRODUCT_INFO));
productInfo->dwVersion = 0;
productInfo->cbCompanyName = 0;
productInfo->pbCompanyName = NULL;
productInfo->cbProductId = 0;
productInfo->pbProductId = NULL;
return productInfo;
}
/**
* Free Product Information (PRODUCT_INFO).\n
* @msdn{cc241915}
* @param productInfo product information
*/
void license_free_product_info(PRODUCT_INFO* productInfo)
{
if (productInfo->pbCompanyName != NULL)
free(productInfo->pbCompanyName);
if (productInfo->pbProductId != NULL)
free(productInfo->pbProductId);
free(productInfo);
}
/**
* Read License Binary Blob (LICENSE_BINARY_BLOB).\n
* @msdn{cc240481}
* @param s stream
* @param blob license binary blob
*/
BOOL license_read_binary_blob(wStream* s, LICENSE_BLOB* blob)
{
UINT16 wBlobType;
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT16(s, wBlobType); /* wBlobType (2 bytes) */
Stream_Read_UINT16(s, blob->length); /* wBlobLen (2 bytes) */
if (Stream_GetRemainingLength(s) < blob->length)
return FALSE;
/*
* Server can choose to not send data by setting length to 0.
* If so, it may not bother to set the type, so shortcut the warning
*/
if ((blob->type != BB_ANY_BLOB) && (blob->length == 0))
return TRUE;
if ((blob->type != wBlobType) && (blob->type != BB_ANY_BLOB))
{
fprintf(stderr, "license binary blob type (%x) does not match expected type (%x).\n", wBlobType, blob->type);
}
blob->type = wBlobType;
blob->data = (BYTE*) malloc(blob->length);
Stream_Read(s, blob->data, blob->length); /* blobData */
return TRUE;
}
/**
* Write License Binary Blob (LICENSE_BINARY_BLOB).\n
* @msdn{cc240481}
* @param s stream
* @param blob license binary blob
*/
void license_write_binary_blob(wStream* s, LICENSE_BLOB* blob)
{
Stream_EnsureRemainingCapacity(s, blob->length + 4);
Stream_Write_UINT16(s, blob->type); /* wBlobType (2 bytes) */
Stream_Write_UINT16(s, blob->length); /* wBlobLen (2 bytes) */
if (blob->length > 0)
Stream_Write(s, blob->data, blob->length); /* blobData */
}
void license_write_encrypted_premaster_secret_blob(wStream* s, LICENSE_BLOB* blob, UINT32 ModulusLength)
{
UINT32 length;
length = ModulusLength + 8;
if (blob->length > ModulusLength)
{
fprintf(stderr, "license_write_encrypted_premaster_secret_blob: invalid blob\n");
return;
}
Stream_EnsureRemainingCapacity(s, length + 4);
Stream_Write_UINT16(s, blob->type); /* wBlobType (2 bytes) */
Stream_Write_UINT16(s, length); /* wBlobLen (2 bytes) */
if (blob->length > 0)
Stream_Write(s, blob->data, blob->length); /* blobData */
Stream_Zero(s, length - blob->length);
}
/**
* Allocate New License Binary Blob (LICENSE_BINARY_BLOB).\n
* @msdn{cc240481}
* @return new license binary blob
*/
LICENSE_BLOB* license_new_binary_blob(UINT16 type)
{
LICENSE_BLOB* blob;
blob = (LICENSE_BLOB*) malloc(sizeof(LICENSE_BLOB));
blob->type = type;
blob->length = 0;
blob->data = NULL;
return blob;
}
/**
* Free License Binary Blob (LICENSE_BINARY_BLOB).\n
* @msdn{cc240481}
* @param blob license binary blob
*/
void license_free_binary_blob(LICENSE_BLOB* blob)
{
if (blob->data != NULL)
free(blob->data);
free(blob);
}
/**
* Read License Scope List (SCOPE_LIST).\n
* @msdn{cc241916}
* @param s stream
* @param scopeList scope list
*/
BOOL license_read_scope_list(wStream* s, SCOPE_LIST* scopeList)
{
UINT32 i;
UINT32 scopeCount;
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT32(s, scopeCount); /* ScopeCount (4 bytes) */
if (Stream_GetRemainingLength(s) / sizeof(LICENSE_BLOB) < scopeCount)
return FALSE; /* Avoid overflow in malloc */
scopeList->count = scopeCount;
scopeList->array = (LICENSE_BLOB*) malloc(sizeof(LICENSE_BLOB) * scopeCount);
/* ScopeArray */
for (i = 0; i < scopeCount; i++)
{
scopeList->array[i].type = BB_SCOPE_BLOB;
if (!license_read_binary_blob(s, &scopeList->array[i]))
return FALSE;
}
return TRUE;
}
/**
* Allocate New License Scope List (SCOPE_LIST).\n
* @msdn{cc241916}
* @return new scope list
*/
SCOPE_LIST* license_new_scope_list()
{
SCOPE_LIST* scopeList;
scopeList = (SCOPE_LIST*) malloc(sizeof(SCOPE_LIST));
scopeList->count = 0;
scopeList->array = NULL;
return scopeList;
}
/**
* Free License Scope List (SCOPE_LIST).\n
* @msdn{cc241916}
* @param scopeList scope list
*/
void license_free_scope_list(SCOPE_LIST* scopeList)
{
UINT32 i;
/*
* We must NOT call license_free_binary_blob() on each scopelist->array[i] element,
* because scopelist->array was allocated at once, by a single call to malloc. The elements
* it contains cannot be deallocated separately then.
* To make things clean, we must deallocate each scopelist->array[].data,
* and finish by deallocating scopelist->array with a single call to free().
*/
for (i = 0; i < scopeList->count; i++)
{
free(scopeList->array[i].data);
}
free(scopeList->array);
free(scopeList);
}
/**
* Read a LICENSE_REQUEST packet.\n
* @msdn{cc241914}
* @param license license module
* @param s stream
*/
BOOL license_read_license_request_packet(rdpLicense* license, wStream* s)
{
/* ServerRandom (32 bytes) */
if (Stream_GetRemainingLength(s) < 32)
return FALSE;
Stream_Read(s, license->ServerRandom, 32);
/* ProductInfo */
if (!license_read_product_info(s, license->ProductInfo))
return FALSE;
/* KeyExchangeList */
if (!license_read_binary_blob(s, license->KeyExchangeList))
return FALSE;
/* ServerCertificate */
if (!license_read_binary_blob(s, license->ServerCertificate))
return FALSE;
/* ScopeList */
if (!license_read_scope_list(s, license->ScopeList))
return FALSE;
/* Parse Server Certificate */
if (!certificate_read_server_certificate(license->certificate,
license->ServerCertificate->data, license->ServerCertificate->length) < 0)
return FALSE;
license_generate_keys(license);
license_generate_hwid(license);
license_encrypt_premaster_secret(license);
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "ServerRandom:\n");
winpr_HexDump(license->ServerRandom, 32);
fprintf(stderr, "\n");
license_print_product_info(license->ProductInfo);
fprintf(stderr, "\n");
license_print_scope_list(license->ScopeList);
fprintf(stderr, "\n");
#endif
return TRUE;
}
/**
* Read a PLATFORM_CHALLENGE packet.\n
* @msdn{cc241921}
* @param license license module
* @param s stream
*/
BOOL license_read_platform_challenge_packet(rdpLicense* license, wStream* s)
{
BYTE MacData[16];
UINT32 ConnectFlags = 0;
DEBUG_LICENSE("Receiving Platform Challenge Packet");
if (Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT32(s, ConnectFlags); /* ConnectFlags, Reserved (4 bytes) */
/* EncryptedPlatformChallenge */
license->EncryptedPlatformChallenge->type = BB_ANY_BLOB;
license_read_binary_blob(s, license->EncryptedPlatformChallenge);
license->EncryptedPlatformChallenge->type = BB_ENCRYPTED_DATA_BLOB;
if (Stream_GetRemainingLength(s) < 16)
return FALSE;
Stream_Read(s, MacData, 16); /* MACData (16 bytes) */
license_decrypt_platform_challenge(license);
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "ConnectFlags: 0x%08X\n", ConnectFlags);
fprintf(stderr, "\n");
fprintf(stderr, "EncryptedPlatformChallenge:\n");
winpr_HexDump(license->EncryptedPlatformChallenge->data, license->EncryptedPlatformChallenge->length);
fprintf(stderr, "\n");
fprintf(stderr, "PlatformChallenge:\n");
winpr_HexDump(license->PlatformChallenge->data, license->PlatformChallenge->length);
fprintf(stderr, "\n");
fprintf(stderr, "MacData:\n");
winpr_HexDump(MacData, 16);
fprintf(stderr, "\n");
#endif
return TRUE;
}
/**
* Read a NEW_LICENSE packet.\n
* @msdn{cc241926}
* @param license license module
* @param s stream
*/
void license_read_new_license_packet(rdpLicense* license, wStream* s)
{
DEBUG_LICENSE("Receiving New License Packet");
license->state = LICENSE_STATE_COMPLETED;
}
/**
* Read an UPGRADE_LICENSE packet.\n
* @msdn{cc241924}
* @param license license module
* @param s stream
*/
void license_read_upgrade_license_packet(rdpLicense* license, wStream* s)
{
DEBUG_LICENSE("Receiving Upgrade License Packet");
license->state = LICENSE_STATE_COMPLETED;
}
/**
* Read an ERROR_ALERT packet.\n
* @msdn{cc240482}
* @param license license module
* @param s stream
*/
BOOL license_read_error_alert_packet(rdpLicense* license, wStream* s)
{
UINT32 dwErrorCode;
UINT32 dwStateTransition;
if (Stream_GetRemainingLength(s) < 8)
return FALSE;
Stream_Read_UINT32(s, dwErrorCode); /* dwErrorCode (4 bytes) */
Stream_Read_UINT32(s, dwStateTransition); /* dwStateTransition (4 bytes) */
if (!license_read_binary_blob(s, license->ErrorInfo)) /* bbErrorInfo */
return FALSE;
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "dwErrorCode: %s, dwStateTransition: %s\n",
error_codes[dwErrorCode], state_transitions[dwStateTransition]);
#endif
if (dwErrorCode == STATUS_VALID_CLIENT)
{
license->state = LICENSE_STATE_COMPLETED;
return TRUE;
}
switch (dwStateTransition)
{
case ST_TOTAL_ABORT:
license->state = LICENSE_STATE_ABORTED;
break;
case ST_NO_TRANSITION:
license->state = LICENSE_STATE_COMPLETED;
break;
case ST_RESET_PHASE_TO_START:
license->state = LICENSE_STATE_AWAIT;
break;
case ST_RESEND_LAST_MESSAGE:
break;
default:
break;
}
return TRUE;
}
/**
* Write a NEW_LICENSE_REQUEST packet.\n
* @msdn{cc241918}
* @param license license module
* @param s stream
*/
void license_write_new_license_request_packet(rdpLicense* license, wStream* s)
{
UINT32 PlatformId;
UINT32 PreferredKeyExchangeAlg = KEY_EXCHANGE_ALG_RSA;
PlatformId = CLIENT_OS_ID_WINNT_POST_52 | CLIENT_IMAGE_ID_MICROSOFT;
Stream_Write_UINT32(s, PreferredKeyExchangeAlg); /* PreferredKeyExchangeAlg (4 bytes) */
Stream_Write_UINT32(s, PlatformId); /* PlatformId (4 bytes) */
Stream_Write(s, license->ClientRandom, 32); /* ClientRandom (32 bytes) */
license_write_encrypted_premaster_secret_blob(s, license->EncryptedPremasterSecret, license->ModulusLength); /* EncryptedPremasterSecret */
license_write_binary_blob(s, license->ClientUserName); /* ClientUserName */
license_write_binary_blob(s, license->ClientMachineName); /* ClientMachineName */
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "PreferredKeyExchangeAlg: 0x%08X\n", PreferredKeyExchangeAlg);
fprintf(stderr, "\n");
fprintf(stderr, "ClientRandom:\n");
winpr_HexDump(license->ClientRandom, 32);
fprintf(stderr, "\n");
fprintf(stderr, "EncryptedPremasterSecret\n");
winpr_HexDump(license->EncryptedPremasterSecret->data, license->EncryptedPremasterSecret->length);
fprintf(stderr, "\n");
fprintf(stderr, "ClientUserName (%d): %s\n", license->ClientUserName->length, (char*) license->ClientUserName->data);
fprintf(stderr, "\n");
fprintf(stderr, "ClientMachineName (%d): %s\n", license->ClientMachineName->length, (char*) license->ClientMachineName->data);
fprintf(stderr, "\n");
#endif
}
/**
* Send a NEW_LICENSE_REQUEST packet.\n
* @msdn{cc241918}
* @param license license module
*/
void license_send_new_license_request_packet(rdpLicense* license)
{
wStream* s;
char* username;
DEBUG_LICENSE("Sending New License Packet");
s = license_send_stream_init(license);
if (license->rdp->settings->Username != NULL)
username = license->rdp->settings->Username;
else
username = "username";
license->ClientUserName->data = (BYTE*) username;
license->ClientUserName->length = strlen(username) + 1;
license->ClientMachineName->data = (BYTE*) license->rdp->settings->ClientHostname;
license->ClientMachineName->length = strlen(license->rdp->settings->ClientHostname) + 1;
license_write_new_license_request_packet(license, s);
license_send(license, s, NEW_LICENSE_REQUEST);
license->ClientUserName->data = NULL;
license->ClientUserName->length = 0;
license->ClientMachineName->data = NULL;
license->ClientMachineName->length = 0;
}
/**
* Write Client Challenge Response Packet.\n
* @msdn{cc241922}
* @param license license module
* @param s stream
* @param mac_data signature
*/
void license_write_platform_challenge_response_packet(rdpLicense* license, wStream* s, BYTE* macData)
{
license_write_binary_blob(s, license->EncryptedPlatformChallenge); /* EncryptedPlatformChallengeResponse */
license_write_binary_blob(s, license->EncryptedHardwareId); /* EncryptedHWID */
Stream_EnsureRemainingCapacity(s, 16);
Stream_Write(s, macData, 16); /* MACData */
}
/**
* Send Client Challenge Response Packet.\n
* @msdn{cc241922}
* @param license license module
*/
void license_send_platform_challenge_response_packet(rdpLicense* license)
{
wStream* s;
int length;
BYTE* buffer;
CryptoRc4 rc4;
BYTE mac_data[16];
DEBUG_LICENSE("Sending Platform Challenge Response Packet");
s = license_send_stream_init(license);
license->EncryptedPlatformChallenge->type = BB_DATA_BLOB;
length = license->PlatformChallenge->length + HWID_LENGTH;
buffer = (BYTE*) malloc(length);
CopyMemory(buffer, license->PlatformChallenge->data, license->PlatformChallenge->length);
CopyMemory(&buffer[license->PlatformChallenge->length], license->HardwareId, HWID_LENGTH);
security_mac_data(license->MacSaltKey, buffer, length, mac_data);
free(buffer);
buffer = (BYTE*) malloc(HWID_LENGTH);
rc4 = crypto_rc4_init(license->LicensingEncryptionKey, LICENSING_ENCRYPTION_KEY_LENGTH);
crypto_rc4(rc4, HWID_LENGTH, license->HardwareId, buffer);
crypto_rc4_free(rc4);
license->EncryptedHardwareId->type = BB_DATA_BLOB;
license->EncryptedHardwareId->data = buffer;
license->EncryptedHardwareId->length = HWID_LENGTH;
#ifdef WITH_DEBUG_LICENSE
fprintf(stderr, "LicensingEncryptionKey:\n");
winpr_HexDump(license->LicensingEncryptionKey, 16);
fprintf(stderr, "\n");
fprintf(stderr, "HardwareId:\n");
winpr_HexDump(license->HardwareId, HWID_LENGTH);
fprintf(stderr, "\n");
fprintf(stderr, "EncryptedHardwareId:\n");
winpr_HexDump(license->EncryptedHardwareId->data, HWID_LENGTH);
fprintf(stderr, "\n");
#endif
license_write_platform_challenge_response_packet(license, s, mac_data);
license_send(license, s, PLATFORM_CHALLENGE_RESPONSE);
}
/**
* Send Server License Error - Valid Client Packet.\n
* @msdn{cc241922}
* @param license license module
*/
BOOL license_send_valid_client_error_packet(rdpLicense* license)
{
wStream* s;
s = license_send_stream_init(license);
DEBUG_LICENSE("Sending Error Alert Packet");
Stream_Write_UINT32(s, STATUS_VALID_CLIENT); /* dwErrorCode */
Stream_Write_UINT32(s, ST_NO_TRANSITION); /* dwStateTransition */
license_write_binary_blob(s, license->ErrorInfo);
return license_send(license, s, ERROR_ALERT);
}
/**
* Instantiate new license module.
* @param rdp RDP module
* @return new license module
*/
rdpLicense* license_new(rdpRdp* rdp)
{
rdpLicense* license;
license = (rdpLicense*) malloc(sizeof(rdpLicense));
if (license != NULL)
{
ZeroMemory(license, sizeof(rdpLicense));
license->rdp = rdp;
license->state = LICENSE_STATE_AWAIT;
license->certificate = certificate_new();
license->ProductInfo = license_new_product_info();
license->ErrorInfo = license_new_binary_blob(BB_ERROR_BLOB);
license->KeyExchangeList = license_new_binary_blob(BB_KEY_EXCHG_ALG_BLOB);
license->ServerCertificate = license_new_binary_blob(BB_CERTIFICATE_BLOB);
license->ClientUserName = license_new_binary_blob(BB_CLIENT_USER_NAME_BLOB);
license->ClientMachineName = license_new_binary_blob(BB_CLIENT_MACHINE_NAME_BLOB);
license->PlatformChallenge = license_new_binary_blob(BB_ANY_BLOB);
license->EncryptedPlatformChallenge = license_new_binary_blob(BB_ANY_BLOB);
license->EncryptedPremasterSecret = license_new_binary_blob(BB_ANY_BLOB);
license->EncryptedHardwareId = license_new_binary_blob(BB_ENCRYPTED_DATA_BLOB);
license->ScopeList = license_new_scope_list();
license_generate_randoms(license);
}
return license;
}
/**
* Free license module.
* @param license license module to be freed
*/
void license_free(rdpLicense* license)
{
if (license)
{
free(license->Modulus);
certificate_free(license->certificate);
license_free_product_info(license->ProductInfo);
license_free_binary_blob(license->ErrorInfo);
license_free_binary_blob(license->KeyExchangeList);
license_free_binary_blob(license->ServerCertificate);
license_free_binary_blob(license->ClientUserName);
license_free_binary_blob(license->ClientMachineName);
license_free_binary_blob(license->PlatformChallenge);
license_free_binary_blob(license->EncryptedPlatformChallenge);
license_free_binary_blob(license->EncryptedPremasterSecret);
license_free_binary_blob(license->EncryptedHardwareId);
license_free_scope_list(license->ScopeList);
free(license);
}
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_2046_0 |
crossvul-cpp_data_good_5764_0 | /*
* IPV6 GSO/GRO offload support
* Linux INET6 implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* UDPv6 GSO support
*/
#include <linux/skbuff.h>
#include <net/protocol.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
static int udp6_ufo_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
if (!pskb_may_pull(skb, sizeof(*uh)))
return -EINVAL;
if (likely(!skb->encapsulation)) {
ipv6h = ipv6_hdr(skb);
uh = udp_hdr(skb);
uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
IPPROTO_UDP, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
}
return 0;
}
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
int tnl_hlen;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
segs = skb_udp_tunnel_segment(skb, features);
else {
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb_checksum_start_offset(skb);
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);
if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
goto out;
}
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
unfrag_ip6hlen + tnl_hlen;
packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
}
out:
return segs;
}
static const struct net_offload udpv6_offload = {
.callbacks = {
.gso_send_check = udp6_ufo_send_check,
.gso_segment = udp6_ufo_fragment,
},
};
int __init udp_offload_init(void)
{
return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5764_0 |
crossvul-cpp_data_good_5798_0 | #include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "decl.h"
#include "cmd.h"
#include "debugfs.h"
static struct dentry *lbs_dir;
static char *szStates[] = {
"Connected",
"Disconnected"
};
#ifdef PROC_DEBUG
static void lbs_debug_init(struct lbs_private *priv);
#endif
static ssize_t write_file_dummy(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return -EINVAL;
}
static const size_t len = PAGE_SIZE;
static ssize_t lbs_dev_info(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
size_t pos = 0;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
ssize_t res;
if (!buf)
return -ENOMEM;
pos += snprintf(buf+pos, len-pos, "state = %s\n",
szStates[priv->connect_status]);
pos += snprintf(buf+pos, len-pos, "region_code = %02x\n",
(u32) priv->regioncode);
res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
free_page(addr);
return res;
}
static ssize_t lbs_sleepparams_write(struct file *file,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t buf_size, ret;
struct sleep_params sp;
int p1, p2, p3, p4, p5, p6;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, user_buf, buf_size)) {
ret = -EFAULT;
goto out_unlock;
}
ret = sscanf(buf, "%d %d %d %d %d %d", &p1, &p2, &p3, &p4, &p5, &p6);
if (ret != 6) {
ret = -EINVAL;
goto out_unlock;
}
sp.sp_error = p1;
sp.sp_offset = p2;
sp.sp_stabletime = p3;
sp.sp_calcontrol = p4;
sp.sp_extsleepclk = p5;
sp.sp_reserved = p6;
ret = lbs_cmd_802_11_sleep_params(priv, CMD_ACT_SET, &sp);
if (!ret)
ret = count;
else if (ret > 0)
ret = -EINVAL;
out_unlock:
free_page(addr);
return ret;
}
static ssize_t lbs_sleepparams_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t ret;
size_t pos = 0;
struct sleep_params sp;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
ret = lbs_cmd_802_11_sleep_params(priv, CMD_ACT_GET, &sp);
if (ret)
goto out_unlock;
pos += snprintf(buf, len, "%d %d %d %d %d %d\n", sp.sp_error,
sp.sp_offset, sp.sp_stabletime,
sp.sp_calcontrol, sp.sp_extsleepclk,
sp.sp_reserved);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
out_unlock:
free_page(addr);
return ret;
}
static ssize_t lbs_host_sleep_write(struct file *file,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t buf_size, ret;
int host_sleep;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, user_buf, buf_size)) {
ret = -EFAULT;
goto out_unlock;
}
ret = sscanf(buf, "%d", &host_sleep);
if (ret != 1) {
ret = -EINVAL;
goto out_unlock;
}
if (host_sleep == 0)
ret = lbs_set_host_sleep(priv, 0);
else if (host_sleep == 1) {
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
netdev_info(priv->dev,
"wake parameters not configured\n");
ret = -EINVAL;
goto out_unlock;
}
ret = lbs_set_host_sleep(priv, 1);
} else {
netdev_err(priv->dev, "invalid option\n");
ret = -EINVAL;
}
if (!ret)
ret = count;
out_unlock:
free_page(addr);
return ret;
}
static ssize_t lbs_host_sleep_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t ret;
size_t pos = 0;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
pos += snprintf(buf, len, "%d\n", priv->is_host_sleep_activated);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
free_page(addr);
return ret;
}
/*
* When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might
* get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the
* firmware. Here's an example:
* 04 01 02 00 00 00 05 01 02 00 00 00 06 01 02 00
* 00 00 07 01 02 00 3c 00 00 00 00 00 00 00 03 03
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
* The 04 01 is the TLV type (here TLV_TYPE_RSSI_LOW), 02 00 is the length,
* 00 00 are the data bytes of this TLV. For this TLV, their meaning is
* defined in mrvlietypes_thresholds
*
* This function searches in this TLV data chunk for a given TLV type
* and returns a pointer to the first data byte of the TLV, or to NULL
* if the TLV hasn't been found.
*/
static void *lbs_tlv_find(uint16_t tlv_type, const uint8_t *tlv, uint16_t size)
{
struct mrvl_ie_header *tlv_h;
uint16_t length;
ssize_t pos = 0;
while (pos < size) {
tlv_h = (struct mrvl_ie_header *) tlv;
if (!tlv_h->len)
return NULL;
if (tlv_h->type == cpu_to_le16(tlv_type))
return tlv_h;
length = le16_to_cpu(tlv_h->len) + sizeof(*tlv_h);
pos += length;
tlv += length;
}
return NULL;
}
static ssize_t lbs_threshold_read(uint16_t tlv_type, uint16_t event_mask,
struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct cmd_ds_802_11_subscribe_event *subscribed;
struct mrvl_ie_thresholds *got;
struct lbs_private *priv = file->private_data;
ssize_t ret = 0;
size_t pos = 0;
char *buf;
u8 value;
u8 freq;
int events = 0;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
subscribed = kzalloc(sizeof(*subscribed), GFP_KERNEL);
if (!subscribed) {
ret = -ENOMEM;
goto out_page;
}
subscribed->hdr.size = cpu_to_le16(sizeof(*subscribed));
subscribed->action = cpu_to_le16(CMD_ACT_GET);
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, subscribed);
if (ret)
goto out_cmd;
got = lbs_tlv_find(tlv_type, subscribed->tlv, sizeof(subscribed->tlv));
if (got) {
value = got->value;
freq = got->freq;
events = le16_to_cpu(subscribed->events);
pos += snprintf(buf, len, "%d %d %d\n", value, freq,
!!(events & event_mask));
}
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
out_cmd:
kfree(subscribed);
out_page:
free_page((unsigned long)buf);
return ret;
}
static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask,
struct file *file,
const char __user *userbuf, size_t count,
loff_t *ppos)
{
struct cmd_ds_802_11_subscribe_event *events;
struct mrvl_ie_thresholds *tlv;
struct lbs_private *priv = file->private_data;
ssize_t buf_size;
int value, freq, new_mask;
uint16_t curr_mask;
char *buf;
int ret;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
ret = -EFAULT;
goto out_page;
}
ret = sscanf(buf, "%d %d %d", &value, &freq, &new_mask);
if (ret != 3) {
ret = -EINVAL;
goto out_page;
}
events = kzalloc(sizeof(*events), GFP_KERNEL);
if (!events) {
ret = -ENOMEM;
goto out_page;
}
events->hdr.size = cpu_to_le16(sizeof(*events));
events->action = cpu_to_le16(CMD_ACT_GET);
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events);
if (ret)
goto out_events;
curr_mask = le16_to_cpu(events->events);
if (new_mask)
new_mask = curr_mask | event_mask;
else
new_mask = curr_mask & ~event_mask;
/* Now everything is set and we can send stuff down to the firmware */
tlv = (void *)events->tlv;
events->action = cpu_to_le16(CMD_ACT_SET);
events->events = cpu_to_le16(new_mask);
tlv->header.type = cpu_to_le16(tlv_type);
tlv->header.len = cpu_to_le16(sizeof(*tlv) - sizeof(tlv->header));
tlv->value = value;
if (tlv_type != TLV_TYPE_BCNMISS)
tlv->freq = freq;
/* The command header, the action, the event mask, and one TLV */
events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv));
ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events);
if (!ret)
ret = count;
out_events:
kfree(events);
out_page:
free_page((unsigned long)buf);
return ret;
}
static ssize_t lbs_lowrssi_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_RSSI_LOW, CMD_SUBSCRIBE_RSSI_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_lowrssi_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_RSSI_LOW, CMD_SUBSCRIBE_RSSI_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_lowsnr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_lowsnr_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW,
file, userbuf, count, ppos);
}
static ssize_t lbs_failcount_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_FAILCOUNT, CMD_SUBSCRIBE_FAILCOUNT,
file, userbuf, count, ppos);
}
static ssize_t lbs_failcount_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_FAILCOUNT, CMD_SUBSCRIBE_FAILCOUNT,
file, userbuf, count, ppos);
}
static ssize_t lbs_highrssi_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_RSSI_HIGH, CMD_SUBSCRIBE_RSSI_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_highrssi_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_RSSI_HIGH, CMD_SUBSCRIBE_RSSI_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_highsnr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_highsnr_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH,
file, userbuf, count, ppos);
}
static ssize_t lbs_bcnmiss_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_BCNMISS, CMD_SUBSCRIBE_BCNMISS,
file, userbuf, count, ppos);
}
static ssize_t lbs_bcnmiss_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_write(TLV_TYPE_BCNMISS, CMD_SUBSCRIBE_BCNMISS,
file, userbuf, count, ppos);
}
static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t pos = 0;
int ret;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
u32 val = 0;
if (!buf)
return -ENOMEM;
ret = lbs_get_reg(priv, CMD_MAC_REG_ACCESS, priv->mac_offset, &val);
mdelay(10);
if (!ret) {
pos = snprintf(buf, len, "MAC[0x%x] = 0x%08x\n",
priv->mac_offset, val);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
}
free_page(addr);
return ret;
}
static ssize_t lbs_rdmac_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
priv->mac_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_wrmac_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
u32 offset, value;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
res = sscanf(buf, "%x %x", &offset, &value);
if (res != 2) {
res = -EFAULT;
goto out_unlock;
}
res = lbs_set_reg(priv, CMD_MAC_REG_ACCESS, offset, value);
mdelay(10);
if (!res)
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t pos = 0;
int ret;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
u32 val;
if (!buf)
return -ENOMEM;
ret = lbs_get_reg(priv, CMD_BBP_REG_ACCESS, priv->bbp_offset, &val);
mdelay(10);
if (!ret) {
pos = snprintf(buf, len, "BBP[0x%x] = 0x%08x\n",
priv->bbp_offset, val);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
}
free_page(addr);
return ret;
}
static ssize_t lbs_rdbbp_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
priv->bbp_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_wrbbp_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
u32 offset, value;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
res = sscanf(buf, "%x %x", &offset, &value);
if (res != 2) {
res = -EFAULT;
goto out_unlock;
}
res = lbs_set_reg(priv, CMD_BBP_REG_ACCESS, offset, value);
mdelay(10);
if (!res)
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t pos = 0;
int ret;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
u32 val;
if (!buf)
return -ENOMEM;
ret = lbs_get_reg(priv, CMD_RF_REG_ACCESS, priv->rf_offset, &val);
mdelay(10);
if (!ret) {
pos = snprintf(buf, len, "RF[0x%x] = 0x%08x\n",
priv->rf_offset, val);
ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
}
free_page(addr);
return ret;
}
static ssize_t lbs_rdrf_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
priv->rf_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
return res;
}
static ssize_t lbs_wrrf_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct lbs_private *priv = file->private_data;
ssize_t res, buf_size;
u32 offset, value;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
buf_size = min(count, len - 1);
if (copy_from_user(buf, userbuf, buf_size)) {
res = -EFAULT;
goto out_unlock;
}
res = sscanf(buf, "%x %x", &offset, &value);
if (res != 2) {
res = -EFAULT;
goto out_unlock;
}
res = lbs_set_reg(priv, CMD_RF_REG_ACCESS, offset, value);
mdelay(10);
if (!res)
res = count;
out_unlock:
free_page(addr);
return res;
}
#define FOPS(fread, fwrite) { \
.owner = THIS_MODULE, \
.open = simple_open, \
.read = (fread), \
.write = (fwrite), \
.llseek = generic_file_llseek, \
}
struct lbs_debugfs_files {
const char *name;
umode_t perm;
struct file_operations fops;
};
static const struct lbs_debugfs_files debugfs_files[] = {
{ "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
{ "sleepparams", 0644, FOPS(lbs_sleepparams_read,
lbs_sleepparams_write), },
{ "hostsleep", 0644, FOPS(lbs_host_sleep_read,
lbs_host_sleep_write), },
};
static const struct lbs_debugfs_files debugfs_events_files[] = {
{"low_rssi", 0644, FOPS(lbs_lowrssi_read,
lbs_lowrssi_write), },
{"low_snr", 0644, FOPS(lbs_lowsnr_read,
lbs_lowsnr_write), },
{"failure_count", 0644, FOPS(lbs_failcount_read,
lbs_failcount_write), },
{"beacon_missed", 0644, FOPS(lbs_bcnmiss_read,
lbs_bcnmiss_write), },
{"high_rssi", 0644, FOPS(lbs_highrssi_read,
lbs_highrssi_write), },
{"high_snr", 0644, FOPS(lbs_highsnr_read,
lbs_highsnr_write), },
};
static const struct lbs_debugfs_files debugfs_regs_files[] = {
{"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), },
{"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), },
{"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), },
{"wrbbp", 0600, FOPS(NULL, lbs_wrbbp_write), },
{"rdrf", 0644, FOPS(lbs_rdrf_read, lbs_rdrf_write), },
{"wrrf", 0600, FOPS(NULL, lbs_wrrf_write), },
};
void lbs_debugfs_init(void)
{
if (!lbs_dir)
lbs_dir = debugfs_create_dir("lbs_wireless", NULL);
}
void lbs_debugfs_remove(void)
{
if (lbs_dir)
debugfs_remove(lbs_dir);
}
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
{
int i;
const struct lbs_debugfs_files *files;
if (!lbs_dir)
goto exit;
priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir);
if (!priv->debugfs_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_files); i++) {
files = &debugfs_files[i];
priv->debugfs_files[i] = debugfs_create_file(files->name,
files->perm,
priv->debugfs_dir,
priv,
&files->fops);
}
priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir);
if (!priv->events_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_events_files); i++) {
files = &debugfs_events_files[i];
priv->debugfs_events_files[i] = debugfs_create_file(files->name,
files->perm,
priv->events_dir,
priv,
&files->fops);
}
priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir);
if (!priv->regs_dir)
goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_regs_files); i++) {
files = &debugfs_regs_files[i];
priv->debugfs_regs_files[i] = debugfs_create_file(files->name,
files->perm,
priv->regs_dir,
priv,
&files->fops);
}
#ifdef PROC_DEBUG
lbs_debug_init(priv);
#endif
exit:
return;
}
void lbs_debugfs_remove_one(struct lbs_private *priv)
{
int i;
for(i=0; i<ARRAY_SIZE(debugfs_regs_files); i++)
debugfs_remove(priv->debugfs_regs_files[i]);
debugfs_remove(priv->regs_dir);
for(i=0; i<ARRAY_SIZE(debugfs_events_files); i++)
debugfs_remove(priv->debugfs_events_files[i]);
debugfs_remove(priv->events_dir);
#ifdef PROC_DEBUG
debugfs_remove(priv->debugfs_debug);
#endif
for(i=0; i<ARRAY_SIZE(debugfs_files); i++)
debugfs_remove(priv->debugfs_files[i]);
debugfs_remove(priv->debugfs_dir);
}
/* debug entry */
#ifdef PROC_DEBUG
#define item_size(n) (FIELD_SIZEOF(struct lbs_private, n))
#define item_addr(n) (offsetof(struct lbs_private, n))
struct debug_data {
char name[32];
u32 size;
size_t addr;
};
/* To debug any member of struct lbs_private, simply add one line here.
*/
static struct debug_data items[] = {
{"psmode", item_size(psmode), item_addr(psmode)},
{"psstate", item_size(psstate), item_addr(psstate)},
};
static int num_of_items = ARRAY_SIZE(items);
/**
* lbs_debugfs_read - proc read function
*
* @file: file to read
* @userbuf: pointer to buffer
* @count: number of bytes to read
* @ppos: read data starting position
*
* returns: amount of data read or negative error code
*/
static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
int val = 0;
size_t pos = 0;
ssize_t res;
char *p;
int i;
struct debug_data *d;
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *)addr;
if (!buf)
return -ENOMEM;
p = buf;
d = file->private_data;
for (i = 0; i < num_of_items; i++) {
if (d[i].size == 1)
val = *((u8 *) d[i].addr);
else if (d[i].size == 2)
val = *((u16 *) d[i].addr);
else if (d[i].size == 4)
val = *((u32 *) d[i].addr);
else if (d[i].size == 8)
val = *((u64 *) d[i].addr);
pos += sprintf(p + pos, "%s=%d\n", d[i].name, val);
}
res = simple_read_from_buffer(userbuf, count, ppos, p, pos);
free_page(addr);
return res;
}
/**
* lbs_debugfs_write - proc write function
*
* @f: file pointer
* @buf: pointer to data buffer
* @cnt: data number to write
* @ppos: file position
*
* returns: amount of data written
*/
static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
size_t cnt, loff_t *ppos)
{
int r, i;
char *pdata;
char *p;
char *p0;
char *p1;
char *p2;
struct debug_data *d = f->private_data;
if (cnt == 0)
return 0;
pdata = kmalloc(cnt + 1, GFP_KERNEL);
if (pdata == NULL)
return 0;
if (copy_from_user(pdata, buf, cnt)) {
lbs_deb_debugfs("Copy from user failed\n");
kfree(pdata);
return 0;
}
pdata[cnt] = '\0';
p0 = pdata;
for (i = 0; i < num_of_items; i++) {
do {
p = strstr(p0, d[i].name);
if (p == NULL)
break;
p1 = strchr(p, '\n');
if (p1 == NULL)
break;
p0 = p1++;
p2 = strchr(p, '=');
if (!p2)
break;
p2++;
r = simple_strtoul(p2, NULL, 0);
if (d[i].size == 1)
*((u8 *) d[i].addr) = (u8) r;
else if (d[i].size == 2)
*((u16 *) d[i].addr) = (u16) r;
else if (d[i].size == 4)
*((u32 *) d[i].addr) = (u32) r;
else if (d[i].size == 8)
*((u64 *) d[i].addr) = (u64) r;
break;
} while (1);
}
kfree(pdata);
return (ssize_t)cnt;
}
static const struct file_operations lbs_debug_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = lbs_debugfs_write,
.read = lbs_debugfs_read,
.llseek = default_llseek,
};
/**
* lbs_debug_init - create debug proc file
*
* @priv: pointer to &struct lbs_private
*
* returns: N/A
*/
static void lbs_debug_init(struct lbs_private *priv)
{
int i;
if (!priv->debugfs_dir)
return;
for (i = 0; i < num_of_items; i++)
items[i].addr += (size_t) priv;
priv->debugfs_debug = debugfs_create_file("debug", 0644,
priv->debugfs_dir, &items[0],
&lbs_debug_fops);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5798_0 |
crossvul-cpp_data_bad_1624_0 | /*
* inode.c
*
* PURPOSE
* Inode handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998 Dave Boynton
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 10/04/98 dgb Added rudimentary directory functions
* 10/07/98 Fully working udf_block_map! It works!
* 11/25/98 bmap altered to better support extents
* 12/06/98 blf partition support in udf_iget, udf_block_map
* and udf_read_inode
* 12/12/98 rewrote udf_block_map to handle next extents and descs across
* block boundaries (which is not actually allowed)
* 12/20/98 added support for strategy 4096
* 03/07/99 rewrote udf_block_map (again)
* New funcs, inode_bmap, udf_next_aext
* 04/19/99 Support for writing device EA's for major/minor #
*/
#include "udfdecl.h"
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/crc-itu-t.h>
#include <linux/mpage.h>
#include <linux/aio.h>
#include "udf_i.h"
#include "udf_sb.h"
MODULE_AUTHOR("Ben Fennema");
MODULE_DESCRIPTION("Universal Disk Format Filesystem");
MODULE_LICENSE("GPL");
#define EXTENT_MERGE_SIZE 5
static umode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
static int8_t udf_insert_aext(struct inode *, struct extent_position,
struct kernel_lb_addr, uint32_t);
static void udf_split_extents(struct inode *, int *, int, int,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_prealloc_extents(struct inode *, int, int,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_merge_extents(struct inode *,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_update_extents(struct inode *,
struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
static void __udf_clear_extent_cache(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
if (iinfo->cached_extent.lstart != -1) {
brelse(iinfo->cached_extent.epos.bh);
iinfo->cached_extent.lstart = -1;
}
}
/* Invalidate extent cache */
static void udf_clear_extent_cache(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
__udf_clear_extent_cache(inode);
spin_unlock(&iinfo->i_extent_cache_lock);
}
/* Return contents of extent cache */
static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
loff_t *lbcount, struct extent_position *pos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
int ret = 0;
spin_lock(&iinfo->i_extent_cache_lock);
if ((iinfo->cached_extent.lstart <= bcount) &&
(iinfo->cached_extent.lstart != -1)) {
/* Cache hit */
*lbcount = iinfo->cached_extent.lstart;
memcpy(pos, &iinfo->cached_extent.epos,
sizeof(struct extent_position));
if (pos->bh)
get_bh(pos->bh);
ret = 1;
}
spin_unlock(&iinfo->i_extent_cache_lock);
return ret;
}
/* Add extent to extent cache */
static void udf_update_extent_cache(struct inode *inode, loff_t estart,
struct extent_position *pos, int next_epos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
/* Invalidate previously cached extent */
__udf_clear_extent_cache(inode);
if (pos->bh)
get_bh(pos->bh);
memcpy(&iinfo->cached_extent.epos, pos,
sizeof(struct extent_position));
iinfo->cached_extent.lstart = estart;
if (next_epos)
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
iinfo->cached_extent.epos.offset -=
sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
iinfo->cached_extent.epos.offset -=
sizeof(struct long_ad);
}
spin_unlock(&iinfo->i_extent_cache_lock);
}
void udf_evict_inode(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
int want_delete = 0;
if (!inode->i_nlink && !is_bad_inode(inode)) {
want_delete = 1;
udf_setsize(inode, 0);
udf_update_inode(inode, IS_SYNC(inode));
}
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode);
clear_inode(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
inode->i_size != iinfo->i_lenExtents) {
udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
inode->i_ino, inode->i_mode,
(unsigned long long)inode->i_size,
(unsigned long long)iinfo->i_lenExtents);
}
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
udf_clear_extent_cache(inode);
if (want_delete) {
udf_free_inode(inode);
}
}
static void udf_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
struct udf_inode_info *iinfo = UDF_I(inode);
loff_t isize = inode->i_size;
if (to > isize) {
truncate_pagecache(inode, isize);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
}
}
static int udf_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, udf_get_block, wbc);
}
static int udf_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, udf_get_block);
}
static int udf_readpage(struct file *file, struct page *page)
{
return mpage_readpage(page, udf_get_block);
}
static int udf_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return mpage_readpages(mapping, pages, nr_pages, udf_get_block);
}
static int udf_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
if (unlikely(ret))
udf_write_failed(mapping, pos + len);
return ret;
}
static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
struct iov_iter *iter,
loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
udf_write_failed(mapping, offset + count);
return ret;
}
static sector_t udf_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, udf_get_block);
}
const struct address_space_operations udf_aops = {
.readpage = udf_readpage,
.readpages = udf_readpages,
.writepage = udf_writepage,
.writepages = udf_writepages,
.write_begin = udf_write_begin,
.write_end = generic_write_end,
.direct_IO = udf_direct_IO,
.bmap = udf_bmap,
};
/*
* Expand file stored in ICB to a normal one-block-file
*
* This function requires i_data_sem for writing and releases it.
* This function requires i_mutex held
*/
int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
mark_inode_dirty(inode);
return 0;
}
/*
* Release i_data_sem so that we can lock a page - page lock ranks
* above i_data_sem. i_mutex still protects us against file changes.
*/
up_write(&iinfo->i_data_sem);
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
if (!page)
return -ENOMEM;
if (!PageUptodate(page)) {
kaddr = kmap(page);
memset(kaddr + iinfo->i_lenAlloc, 0x00,
PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
iinfo->i_lenAlloc);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap(page);
}
down_write(&iinfo->i_data_sem);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
err = inode->i_data.a_ops->writepage(page, &udf_wbc);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
kaddr = kmap(page);
down_write(&iinfo->i_data_sem);
memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
inode->i_size);
kunmap(page);
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
up_write(&iinfo->i_data_sem);
}
page_cache_release(page);
mark_inode_dirty(inode);
return err;
}
struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
int *err)
{
int newblock;
struct buffer_head *dbh = NULL;
struct kernel_lb_addr eloc;
uint8_t alloctype;
struct extent_position epos;
struct udf_fileident_bh sfibh, dfibh;
loff_t f_pos = udf_ext0_offset(inode);
int size = udf_ext0_offset(inode) + inode->i_size;
struct fileIdentDesc cfi, *sfi, *dfi;
struct udf_inode_info *iinfo = UDF_I(inode);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
alloctype = ICBTAG_FLAG_AD_SHORT;
else
alloctype = ICBTAG_FLAG_AD_LONG;
if (!inode->i_size) {
iinfo->i_alloc_type = alloctype;
mark_inode_dirty(inode);
return NULL;
}
/* alloc block, and copy data to it */
*block = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,
iinfo->i_location.logicalBlockNum, err);
if (!(*block))
return NULL;
newblock = udf_get_pblock(inode->i_sb, *block,
iinfo->i_location.partitionReferenceNum,
0);
if (!newblock)
return NULL;
dbh = udf_tgetblk(inode->i_sb, newblock);
if (!dbh)
return NULL;
lock_buffer(dbh);
memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(dbh);
unlock_buffer(dbh);
mark_buffer_dirty_inode(dbh, inode);
sfibh.soffset = sfibh.eoffset =
f_pos & (inode->i_sb->s_blocksize - 1);
sfibh.sbh = sfibh.ebh = NULL;
dfibh.soffset = dfibh.eoffset = 0;
dfibh.sbh = dfibh.ebh = dbh;
while (f_pos < size) {
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
NULL, NULL, NULL);
if (!sfi) {
brelse(dbh);
return NULL;
}
iinfo->i_alloc_type = alloctype;
sfi->descTag.tagLocation = cpu_to_le32(*block);
dfibh.soffset = dfibh.eoffset;
dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
sfi->fileIdent +
le16_to_cpu(sfi->lengthOfImpUse))) {
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
brelse(dbh);
return NULL;
}
}
mark_buffer_dirty_inode(dbh, inode);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
eloc.logicalBlockNum = *block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
iinfo->i_lenExtents = inode->i_size;
epos.bh = NULL;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
/* UniqueID stuff */
brelse(epos.bh);
mark_inode_dirty(inode);
return dbh;
}
static int udf_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
int err, new;
sector_t phys = 0;
struct udf_inode_info *iinfo;
if (!create) {
phys = udf_block_map(inode, block);
if (phys)
map_bh(bh_result, inode->i_sb, phys);
return 0;
}
err = -EIO;
new = 0;
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
if (block == iinfo->i_next_alloc_block + 1) {
iinfo->i_next_alloc_block++;
iinfo->i_next_alloc_goal++;
}
udf_clear_extent_cache(inode);
phys = inode_getblk(inode, block, &err, &new);
if (!phys)
goto abort;
if (new)
set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, phys);
abort:
up_write(&iinfo->i_data_sem);
return err;
}
static struct buffer_head *udf_getblk(struct inode *inode, long block,
int create, int *err)
{
struct buffer_head *bh;
struct buffer_head dummy;
dummy.b_state = 0;
dummy.b_blocknr = -1000;
*err = udf_get_block(inode, block, &dummy, create);
if (!*err && buffer_mapped(&dummy)) {
bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (buffer_new(&dummy)) {
lock_buffer(bh);
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode);
}
return bh;
}
return NULL;
}
/* Extend the file by 'blocks' blocks, return the number of extents added */
static int udf_do_extend_file(struct inode *inode,
struct extent_position *last_pos,
struct kernel_long_ad *last_ext,
sector_t blocks)
{
sector_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
struct kernel_lb_addr prealloc_loc = {};
int prealloc_len = 0;
struct udf_inode_info *iinfo;
int err;
/* The previous extent is fake and we should not extend by anything
* - there's nothing to do... */
if (!blocks && fake)
return 0;
iinfo = UDF_I(inode);
/* Round the last extent up to a multiple of block size */
if (last_ext->extLength & (sb->s_blocksize - 1)) {
last_ext->extLength =
(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
iinfo->i_lenExtents =
(iinfo->i_lenExtents + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
}
/* Last extent are just preallocated blocks? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
EXT_NOT_RECORDED_ALLOCATED) {
/* Save the extent so that we can reattach it to the end */
prealloc_loc = last_ext->extLocation;
prealloc_len = last_ext->extLength;
/* Mark the extent as a hole */
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
last_ext->extLocation.logicalBlockNum = 0;
last_ext->extLocation.partitionReferenceNum = 0;
}
/* Can we merge with the previous extent? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
EXT_NOT_RECORDED_NOT_ALLOCATED) {
add = ((1 << 30) - sb->s_blocksize -
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
sb->s_blocksize_bits;
if (add > blocks)
add = blocks;
blocks -= add;
last_ext->extLength += add << sb->s_blocksize_bits;
}
if (fake) {
udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
count++;
} else
udf_write_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
/* Managed to do everything necessary? */
if (!blocks)
goto out;
/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
last_ext->extLocation.logicalBlockNum = 0;
last_ext->extLocation.partitionReferenceNum = 0;
add = (1 << (30-sb->s_blocksize_bits)) - 1;
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(add << sb->s_blocksize_bits);
/* Create enough extents to cover the whole hole */
while (blocks > add) {
blocks -= add;
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
return err;
count++;
}
if (blocks) {
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(blocks << sb->s_blocksize_bits);
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
return err;
count++;
}
out:
/* Do we have some preallocated blocks saved? */
if (prealloc_len) {
err = udf_add_aext(inode, last_pos, &prealloc_loc,
prealloc_len, 1);
if (err)
return err;
last_ext->extLocation = prealloc_loc;
last_ext->extLength = prealloc_len;
count++;
}
/* last_pos should point to the last written extent... */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
last_pos->offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
last_pos->offset -= sizeof(struct long_ad);
else
return -EIO;
return count;
}
static int udf_extend_file(struct inode *inode, loff_t newsize)
{
struct extent_position epos;
struct kernel_lb_addr eloc;
uint32_t elen;
int8_t etype;
struct super_block *sb = inode->i_sb;
sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
struct kernel_long_ad extent;
int err;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
BUG();
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
/* File has extent covering the new size (could happen when extending
* inside a block)? */
if (etype != -1)
return 0;
if (newsize & (sb->s_blocksize - 1))
offset++;
/* Extended file just to the boundary of the last file block? */
if (offset == 0)
return 0;
/* Truncate is extending the file by 'offset' blocks */
if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
/* File has no extents at all or has empty last
* indirect extent! Create a fake extent... */
extent.extLocation.logicalBlockNum = 0;
extent.extLocation.partitionReferenceNum = 0;
extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
} else {
epos.offset -= adsize;
etype = udf_next_aext(inode, &epos, &extent.extLocation,
&extent.extLength, 0);
extent.extLength |= etype << 30;
}
err = udf_do_extend_file(inode, &epos, &extent, offset);
if (err < 0)
goto out;
err = 0;
iinfo->i_lenExtents = newsize;
out:
brelse(epos.bh);
return err;
}
static sector_t inode_getblk(struct inode *inode, sector_t block,
int *err, int *new)
{
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
struct extent_position prev_epos, cur_epos, next_epos;
int count = 0, startnum = 0, endnum = 0;
uint32_t elen = 0, tmpelen;
struct kernel_lb_addr eloc, tmpeloc;
int c = 1;
loff_t lbcount = 0, b_off = 0;
uint32_t newblocknum, newblock;
sector_t offset = 0;
int8_t etype;
struct udf_inode_info *iinfo = UDF_I(inode);
int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
int lastblock = 0;
bool isBeyondEOF;
*err = 0;
*new = 0;
prev_epos.offset = udf_file_entry_alloc_offset(inode);
prev_epos.block = iinfo->i_location;
prev_epos.bh = NULL;
cur_epos = next_epos = prev_epos;
b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
/* find the extent which contains the block we are looking for.
alternate between laarr[0] and laarr[1] for locations of the
current extent, and the previous extent */
do {
if (prev_epos.bh != cur_epos.bh) {
brelse(prev_epos.bh);
get_bh(cur_epos.bh);
prev_epos.bh = cur_epos.bh;
}
if (cur_epos.bh != next_epos.bh) {
brelse(cur_epos.bh);
get_bh(next_epos.bh);
cur_epos.bh = next_epos.bh;
}
lbcount += elen;
prev_epos.block = cur_epos.block;
cur_epos.block = next_epos.block;
prev_epos.offset = cur_epos.offset;
cur_epos.offset = next_epos.offset;
etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
if (etype == -1)
break;
c = !c;
laarr[c].extLength = (etype << 30) | elen;
laarr[c].extLocation = eloc;
if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
pgoal = eloc.logicalBlockNum +
((elen + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
count++;
} while (lbcount + elen <= b_off);
b_off -= lbcount;
offset = b_off >> inode->i_sb->s_blocksize_bits;
/*
* Move prev_epos and cur_epos into indirect extent if we are at
* the pointer to it
*/
udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
/* if the extent is allocated and recorded, return the block
if the extent is not a multiple of the blocksize, round up */
if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
if (elen & (inode->i_sb->s_blocksize - 1)) {
elen = EXT_RECORDED_ALLOCATED |
((elen + inode->i_sb->s_blocksize - 1) &
~(inode->i_sb->s_blocksize - 1));
udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
}
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
return newblock;
}
/* Are we beyond EOF? */
if (etype == -1) {
int ret;
isBeyondEOF = 1;
if (count) {
if (c)
laarr[0] = laarr[1];
startnum = 1;
} else {
/* Create a fake extent when there's not one */
memset(&laarr[0].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
/* Will udf_do_extend_file() create real extent from
a fake one? */
startnum = (offset > 0);
}
/* Create extents for the hole between EOF and offset */
ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
if (ret < 0) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
*err = ret;
return 0;
}
c = 0;
offset = 0;
count += ret;
/* We are not covered by a preallocated extent? */
if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
EXT_NOT_RECORDED_ALLOCATED) {
/* Is there any real extent? - otherwise we overwrite
* the fake one... */
if (count)
c = !c;
laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
inode->i_sb->s_blocksize;
memset(&laarr[c].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
count++;
}
endnum = c + 1;
lastblock = 1;
} else {
isBeyondEOF = 0;
endnum = startnum = ((count > 2) ? 2 : count);
/* if the current extent is in position 0,
swap it with the previous */
if (!c && count != 1) {
laarr[2] = laarr[0];
laarr[0] = laarr[1];
laarr[1] = laarr[2];
c = 1;
}
/* if the current block is located in an extent,
read the next extent */
etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
if (etype != -1) {
laarr[c + 1].extLength = (etype << 30) | elen;
laarr[c + 1].extLocation = eloc;
count++;
startnum++;
endnum++;
} else
lastblock = 1;
}
/* if the current extent is not recorded but allocated, get the
* block in the extent corresponding to the requested block */
if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
else { /* otherwise, allocate a new block */
if (iinfo->i_next_alloc_block == block)
goal = iinfo->i_next_alloc_goal;
if (!goal) {
if (!(goal = pgoal)) /* XXX: what was intended here? */
goal = iinfo->i_location.logicalBlockNum + 1;
}
newblocknum = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,
goal, err);
if (!newblocknum) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
*err = -ENOSPC;
return 0;
}
if (isBeyondEOF)
iinfo->i_lenExtents += inode->i_sb->s_blocksize;
}
/* if the extent the requsted block is located in contains multiple
* blocks, split the extent into at most three extents. blocks prior
* to requested block, requested block, and blocks after requested
* block */
udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
#ifdef UDF_PREALLOCATE
/* We preallocate blocks only for regular files. It also makes sense
* for directories but there's a problem when to drop the
* preallocation. We might use some delayed work for that but I feel
* it's overengineering for a filesystem like UDF. */
if (S_ISREG(inode->i_mode))
udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
#endif
/* merge any continuous blocks in laarr */
udf_merge_extents(inode, laarr, &endnum);
/* write back the new extents, inserting new extents if the new number
* of extents is greater than the old number, and deleting extents if
* the new number of extents is less than the old number */
udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
newblock = udf_get_pblock(inode->i_sb, newblocknum,
iinfo->i_location.partitionReferenceNum, 0);
if (!newblock) {
*err = -EIO;
return 0;
}
*new = 1;
iinfo->i_next_alloc_block = block;
iinfo->i_next_alloc_goal = newblocknum;
inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
return newblock;
}
static void udf_split_extents(struct inode *inode, int *c, int offset,
int newblocknum,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
unsigned long blocksize = inode->i_sb->s_blocksize;
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
(laarr[*c].extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
int curr = *c;
int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits;
int8_t etype = (laarr[curr].extLength >> 30);
if (blen == 1)
;
else if (!offset || blen == offset + 1) {
laarr[curr + 2] = laarr[curr + 1];
laarr[curr + 1] = laarr[curr];
} else {
laarr[curr + 3] = laarr[curr + 1];
laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
}
if (offset) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode,
&laarr[curr].extLocation,
0, offset);
laarr[curr].extLength =
EXT_NOT_RECORDED_NOT_ALLOCATED |
(offset << blocksize_bits);
laarr[curr].extLocation.logicalBlockNum = 0;
laarr[curr].extLocation.
partitionReferenceNum = 0;
} else
laarr[curr].extLength = (etype << 30) |
(offset << blocksize_bits);
curr++;
(*c)++;
(*endnum)++;
}
laarr[curr].extLocation.logicalBlockNum = newblocknum;
if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
laarr[curr].extLocation.partitionReferenceNum =
UDF_I(inode)->i_location.partitionReferenceNum;
laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
blocksize;
curr++;
if (blen != offset + 1) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
laarr[curr].extLocation.logicalBlockNum +=
offset + 1;
laarr[curr].extLength = (etype << 30) |
((blen - (offset + 1)) << blocksize_bits);
curr++;
(*endnum)++;
}
}
}
static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
int start, length = 0, currlength = 0, i;
if (*endnum >= (c + 1)) {
if (!lastblock)
return;
else
start = c;
} else {
if ((laarr[c + 1].extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
start = c + 1;
length = currlength =
(((laarr[c + 1].extLength &
UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
} else
start = c;
}
for (i = start + 1; i <= *endnum; i++) {
if (i == *endnum) {
if (lastblock)
length += UDF_DEFAULT_PREALLOC_BLOCKS;
} else if ((laarr[i].extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
length += (((laarr[i].extLength &
UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
} else
break;
}
if (length) {
int next = laarr[start].extLocation.logicalBlockNum +
(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits);
int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
laarr[start].extLocation.partitionReferenceNum,
next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
length : UDF_DEFAULT_PREALLOC_BLOCKS) -
currlength);
if (numalloc) {
if (start == (c + 1))
laarr[start].extLength +=
(numalloc <<
inode->i_sb->s_blocksize_bits);
else {
memmove(&laarr[c + 2], &laarr[c + 1],
sizeof(struct long_ad) * (*endnum - (c + 1)));
(*endnum)++;
laarr[c + 1].extLocation.logicalBlockNum = next;
laarr[c + 1].extLocation.partitionReferenceNum =
laarr[c].extLocation.
partitionReferenceNum;
laarr[c + 1].extLength =
EXT_NOT_RECORDED_ALLOCATED |
(numalloc <<
inode->i_sb->s_blocksize_bits);
start = c + 1;
}
for (i = start + 1; numalloc && i < *endnum; i++) {
int elen = ((laarr[i].extLength &
UDF_EXTENT_LENGTH_MASK) +
inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (elen > numalloc) {
laarr[i].extLength -=
(numalloc <<
inode->i_sb->s_blocksize_bits);
numalloc = 0;
} else {
numalloc -= elen;
if (*endnum > (i + 1))
memmove(&laarr[i],
&laarr[i + 1],
sizeof(struct long_ad) *
(*endnum - (i + 1)));
i--;
(*endnum)--;
}
}
UDF_I(inode)->i_lenExtents +=
numalloc << inode->i_sb->s_blocksize_bits;
}
}
}
static void udf_merge_extents(struct inode *inode,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
int i;
unsigned long blocksize = inode->i_sb->s_blocksize;
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
for (i = 0; i < (*endnum - 1); i++) {
struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
(((li->extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
((lip1->extLocation.logicalBlockNum -
li->extLocation.logicalBlockNum) ==
(((li->extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits)))) {
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
lip1->extLength = (lip1->extLength -
(li->extLength &
UDF_EXTENT_LENGTH_MASK) +
UDF_EXTENT_LENGTH_MASK) &
~(blocksize - 1);
li->extLength = (li->extLength &
UDF_EXTENT_FLAG_MASK) +
(UDF_EXTENT_LENGTH_MASK + 1) -
blocksize;
lip1->extLocation.logicalBlockNum =
li->extLocation.logicalBlockNum +
((li->extLength &
UDF_EXTENT_LENGTH_MASK) >>
blocksize_bits);
} else {
li->extLength = lip1->extLength +
(((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~(blocksize - 1));
if (*endnum > (i + 2))
memmove(&laarr[i + 1], &laarr[i + 2],
sizeof(struct long_ad) *
(*endnum - (i + 2)));
i--;
(*endnum)--;
}
} else if (((li->extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
((lip1->extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits);
li->extLocation.logicalBlockNum = 0;
li->extLocation.partitionReferenceNum = 0;
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
lip1->extLength = (lip1->extLength -
(li->extLength &
UDF_EXTENT_LENGTH_MASK) +
UDF_EXTENT_LENGTH_MASK) &
~(blocksize - 1);
li->extLength = (li->extLength &
UDF_EXTENT_FLAG_MASK) +
(UDF_EXTENT_LENGTH_MASK + 1) -
blocksize;
} else {
li->extLength = lip1->extLength +
(((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) & ~(blocksize - 1));
if (*endnum > (i + 2))
memmove(&laarr[i + 1], &laarr[i + 2],
sizeof(struct long_ad) *
(*endnum - (i + 2)));
i--;
(*endnum)--;
}
} else if ((li->extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode,
&li->extLocation, 0,
((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits);
li->extLocation.logicalBlockNum = 0;
li->extLocation.partitionReferenceNum = 0;
li->extLength = (li->extLength &
UDF_EXTENT_LENGTH_MASK) |
EXT_NOT_RECORDED_NOT_ALLOCATED;
}
}
}
static void udf_update_extents(struct inode *inode,
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int startnum, int endnum,
struct extent_position *epos)
{
int start = 0, i;
struct kernel_lb_addr tmploc;
uint32_t tmplen;
if (startnum > endnum) {
for (i = 0; i < (startnum - endnum); i++)
udf_delete_aext(inode, *epos, laarr[i].extLocation,
laarr[i].extLength);
} else if (startnum < endnum) {
for (i = 0; i < (endnum - startnum); i++) {
udf_insert_aext(inode, *epos, laarr[i].extLocation,
laarr[i].extLength);
udf_next_aext(inode, epos, &laarr[i].extLocation,
&laarr[i].extLength, 1);
start++;
}
}
for (i = start; i < endnum; i++) {
udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
udf_write_aext(inode, epos, &laarr[i].extLocation,
laarr[i].extLength, 1);
}
}
struct buffer_head *udf_bread(struct inode *inode, int block,
int create, int *err)
{
struct buffer_head *bh = NULL;
bh = udf_getblk(inode, block, create, err);
if (!bh)
return NULL;
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
brelse(bh);
*err = -EIO;
return NULL;
}
int udf_setsize(struct inode *inode, loff_t newsize)
{
int err;
struct udf_inode_info *iinfo;
int bsize = 1 << inode->i_blkbits;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
iinfo = UDF_I(inode);
if (newsize > inode->i_size) {
down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
if (bsize <
(udf_file_entry_alloc_offset(inode) + newsize)) {
err = udf_expand_file_adinicb(inode);
if (err)
return err;
down_write(&iinfo->i_data_sem);
} else {
iinfo->i_lenAlloc = newsize;
goto set_size;
}
}
err = udf_extend_file(inode, newsize);
if (err) {
up_write(&iinfo->i_data_sem);
return err;
}
set_size:
truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
0x00, bsize - newsize -
udf_file_entry_alloc_offset(inode));
iinfo->i_lenAlloc = newsize;
truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
goto update_time;
}
err = block_truncate_page(inode->i_mapping, newsize,
udf_get_block);
if (err)
return err;
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
update_time:
inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
return 0;
}
/*
* Maximum length of linked list formed by ICB hierarchy. The chosen number is
* arbitrary - just that we hopefully don't limit any real use of rewritten
* inode on write-once media but avoid looping for too long on corrupted media.
*/
#define UDF_MAX_ICB_NESTING 1024
static int udf_read_inode(struct inode *inode, bool hidden_inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct kernel_lb_addr *iloc = &iinfo->i_location;
unsigned int link_count;
unsigned int indirections = 0;
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
reread:
if (iloc->logicalBlockNum >=
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
udf_debug("block=%d, partition=%d out of range\n",
iloc->logicalBlockNum, iloc->partitionReferenceNum);
return -EIO;
}
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
* i_sb = sb
* i_no = ino
* i_flags = sb->s_flags
* i_state = 0
* clean_inode(): zero fills and sets
* i_count = 1
* i_nlink = 1
* i_op = NULL;
*/
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
inode->i_ino, ident);
goto out;
}
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
if (ident == TAG_IDENT_IE && ibh) {
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength) {
brelse(ibh);
memcpy(&iinfo->i_location, &loc,
sizeof(struct kernel_lb_addr));
if (++indirections > UDF_MAX_ICB_NESTING) {
udf_err(inode->i_sb,
"too many ICBs in ICB hierarchy"
" (max %d supported)\n",
UDF_MAX_ICB_NESTING);
goto out;
}
brelse(bh);
goto reread;
}
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
udf_err(inode->i_sb, "unsupported strategy type: %d\n",
le16_to_cpu(fe->icbTag.strategyType));
goto out;
}
if (fe->icbTag.strategyType == cpu_to_le16(4))
iinfo->i_strat4096 = 0;
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
iinfo->i_strat4096 = 1;
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_next_alloc_block = 0;
iinfo->i_next_alloc_goal = 0;
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
iinfo->i_efe = 1;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs -
sizeof(struct extendedFileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct extendedFileEntry),
bs - sizeof(struct extendedFileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
iinfo->i_efe = 0;
iinfo->i_use = 0;
ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct fileEntry),
bs - sizeof(struct fileEntry));
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
iinfo->i_efe = 0;
iinfo->i_use = 1;
iinfo->i_lenAlloc = le32_to_cpu(
((struct unallocSpaceEntry *)bh->b_data)->
lengthAllocDescs);
ret = udf_alloc_i_data(inode, bs -
sizeof(struct unallocSpaceEntry));
if (ret)
goto out;
memcpy(iinfo->i_ext.i_data,
bh->b_data + sizeof(struct unallocSpaceEntry),
bs - sizeof(struct unallocSpaceEntry));
return 0;
}
ret = -EIO;
read_lock(&sbi->s_cred_lock);
i_uid_write(inode, le32_to_cpu(fe->uid));
if (!uid_valid(inode->i_uid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
i_gid_write(inode, le32_to_cpu(fe->gid));
if (!gid_valid(inode->i_gid) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_fmode;
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_dmode != UDF_INVALID_MODE)
inode->i_mode = sbi->s_dmode;
else
inode->i_mode = udf_convert_permissions(fe);
inode->i_mode &= ~sbi->s_umask;
read_unlock(&sbi->s_cred_lock);
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count) {
if (!hidden_inode) {
ret = -ESTALE;
goto out;
}
link_count = 1;
}
set_nlink(inode, link_count);
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
fe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
inode->i_atime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_mtime,
efe->modificationTime))
inode->i_mtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
iinfo->i_crtime = sbi->s_record_time;
if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
inode->i_ctime = sbi->s_record_time;
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
}
inode->i_generation = iinfo->i_unique;
/* Sanity checks for files in ICB so that we don't get confused later */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
/*
* For file in ICB data is stored in allocation descriptor
* so sizes should match
*/
if (iinfo->i_lenAlloc != inode->i_size)
goto out;
/* File in ICB has to fit in there... */
if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
goto out;
}
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
inode->i_mode |= S_IFDIR;
inc_nlink(inode);
break;
case ICBTAG_FILE_TYPE_REALTIME:
case ICBTAG_FILE_TYPE_REGULAR:
case ICBTAG_FILE_TYPE_UNDEF:
case ICBTAG_FILE_TYPE_VAT20:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
inode->i_data.a_ops = &udf_adinicb_aops;
else
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
inode->i_mode |= S_IFREG;
break;
case ICBTAG_FILE_TYPE_BLOCK:
inode->i_mode |= S_IFBLK;
break;
case ICBTAG_FILE_TYPE_CHAR:
inode->i_mode |= S_IFCHR;
break;
case ICBTAG_FILE_TYPE_FIFO:
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
break;
case ICBTAG_FILE_TYPE_SOCKET:
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
break;
case ICBTAG_FILE_TYPE_SYMLINK:
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode->i_mode = S_IFLNK | S_IRWXUGO;
break;
case ICBTAG_FILE_TYPE_MAIN:
udf_debug("METADATA FILE-----\n");
break;
case ICBTAG_FILE_TYPE_MIRROR:
udf_debug("METADATA MIRROR FILE-----\n");
break;
case ICBTAG_FILE_TYPE_BITMAP:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
inode->i_ino, fe->icbTag.fileType);
goto out;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (dsea) {
init_special_inode(inode, inode->i_mode,
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
le32_to_cpu(dsea->minorDeviceIdent)));
/* Developer ID ??? */
} else
goto out;
}
ret = 0;
out:
brelse(bh);
return ret;
}
static int udf_alloc_i_data(struct inode *inode, size_t size)
{
struct udf_inode_info *iinfo = UDF_I(inode);
iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
if (!iinfo->i_ext.i_data) {
udf_err(inode->i_sb, "(ino %ld) no free memory\n",
inode->i_ino);
return -ENOMEM;
}
return 0;
}
static umode_t udf_convert_permissions(struct fileEntry *fe)
{
umode_t mode;
uint32_t permissions;
uint32_t flags;
permissions = le32_to_cpu(fe->permissions);
flags = le16_to_cpu(fe->icbTag.flags);
mode = ((permissions) & S_IRWXO) |
((permissions >> 2) & S_IRWXG) |
((permissions >> 4) & S_IRWXU) |
((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
return mode;
}
int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
static int udf_sync_inode(struct inode *inode)
{
return udf_update_inode(inode, 1);
}
static int udf_update_inode(struct inode *inode, int do_sync)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint64_t lb_recorded;
uint32_t udfperms;
uint16_t icbflags;
uint16_t crclen;
int err = 0;
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
struct udf_inode_info *iinfo = UDF_I(inode);
bh = udf_tgetblk(inode->i_sb,
udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
if (!bh) {
udf_debug("getblk failure\n");
return -ENOMEM;
}
lock_buffer(bh);
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
if (iinfo->i_use) {
struct unallocSpaceEntry *use =
(struct unallocSpaceEntry *)bh->b_data;
use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
use->descTag.tagLocation =
cpu_to_le32(iinfo->i_location.logicalBlockNum);
crclen = sizeof(struct unallocSpaceEntry) +
iinfo->i_lenAlloc - sizeof(struct tag);
use->descTag.descCRCLength = cpu_to_le16(crclen);
use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
sizeof(struct tag),
crclen));
use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
goto out;
}
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
fe->uid = cpu_to_le32(-1);
else
fe->uid = cpu_to_le32(i_uid_read(inode));
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
fe->gid = cpu_to_le32(-1);
else
fe->gid = cpu_to_le32(i_gid_read(inode));
udfperms = ((inode->i_mode & S_IRWXO)) |
((inode->i_mode & S_IRWXG) << 2) |
((inode->i_mode & S_IRWXU) << 4);
udfperms |= (le32_to_cpu(fe->permissions) &
(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
fe->permissions = cpu_to_le32(udfperms);
if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
else
fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
fe->informationLength = cpu_to_le64(inode->i_size);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
struct regid *eid;
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (!dsea) {
dsea = (struct deviceSpec *)
udf_add_extendedattr(inode,
sizeof(struct deviceSpec) +
sizeof(struct regid), 12, 0x3);
dsea->attrType = cpu_to_le32(12);
dsea->attrSubtype = 1;
dsea->attrLength = cpu_to_le32(
sizeof(struct deviceSpec) +
sizeof(struct regid));
dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
}
eid = (struct regid *)dsea->impUse;
memset(eid, 0, sizeof(struct regid));
strcpy(eid->ident, UDF_ID_DEVELOPER);
eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
eid->identSuffix[1] = UDF_OS_ID_LINUX;
dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
}
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
lb_recorded = 0; /* No extents => no blocks! */
else
lb_recorded =
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
(blocksize_bits - 9);
if (iinfo->i_efe == 0) {
memcpy(bh->b_data + sizeof(struct fileEntry),
iinfo->i_ext.i_data,
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
fe->uniqueID = cpu_to_le64(iinfo->i_unique);
fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
crclen = sizeof(struct fileEntry);
} else {
memcpy(bh->b_data + sizeof(struct extendedFileEntry),
iinfo->i_ext.i_data,
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
efe->objectSize = cpu_to_le64(inode->i_size);
efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
iinfo->i_crtime = inode->i_atime;
if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
iinfo->i_crtime = inode->i_mtime;
if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
iinfo->i_crtime = inode->i_ctime;
udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
memset(&(efe->impIdent), 0, sizeof(struct regid));
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
efe->uniqueID = cpu_to_le64(iinfo->i_unique);
efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
crclen = sizeof(struct extendedFileEntry);
}
if (iinfo->i_strat4096) {
fe->icbTag.strategyType = cpu_to_le16(4096);
fe->icbTag.strategyParameter = cpu_to_le16(1);
fe->icbTag.numEntries = cpu_to_le16(2);
} else {
fe->icbTag.strategyType = cpu_to_le16(4);
fe->icbTag.numEntries = cpu_to_le16(1);
}
if (S_ISDIR(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
else if (S_ISREG(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
else if (S_ISLNK(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
else if (S_ISBLK(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
else if (S_ISCHR(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
else if (S_ISFIFO(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
else if (S_ISSOCK(inode->i_mode))
fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
icbflags = iinfo->i_alloc_type |
((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
(le16_to_cpu(fe->icbTag.flags) &
~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
fe->icbTag.flags = cpu_to_le16(icbflags);
if (sbi->s_udfrev >= 0x0200)
fe->descTag.descVersion = cpu_to_le16(3);
else
fe->descTag.descVersion = cpu_to_le16(2);
fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
fe->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.logicalBlockNum);
crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
fe->descTag.descCRCLength = cpu_to_le16(crclen);
fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
crclen));
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
out:
set_buffer_uptodate(bh);
unlock_buffer(bh);
/* write the data blocks */
mark_buffer_dirty(bh);
if (do_sync) {
sync_dirty_buffer(bh);
if (buffer_write_io_error(bh)) {
udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
inode->i_ino);
err = -EIO;
}
}
brelse(bh);
return err;
}
struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
bool hidden_inode)
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
int err;
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
err = udf_read_inode(inode, hidden_inode);
if (err < 0) {
iget_failed(inode);
return ERR_PTR(err);
}
unlock_new_inode(inode);
return inode;
}
int udf_add_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
struct short_ad *sad = NULL;
struct long_ad *lad = NULL;
struct allocExtDesc *aed;
uint8_t *ptr;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh)
ptr = iinfo->i_ext.i_data + epos->offset -
udf_file_entry_alloc_offset(inode) +
iinfo->i_lenEAttr;
else
ptr = epos->bh->b_data + epos->offset;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
return -EIO;
if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
unsigned char *sptr, *dptr;
struct buffer_head *nbh;
int err, loffset;
struct kernel_lb_addr obloc = epos->block;
epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
obloc.partitionReferenceNum,
obloc.logicalBlockNum, &err);
if (!epos->block.logicalBlockNum)
return -ENOSPC;
nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
&epos->block,
0));
if (!nbh)
return -EIO;
lock_buffer(nbh);
memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(nbh);
unlock_buffer(nbh);
mark_buffer_dirty_inode(nbh, inode);
aed = (struct allocExtDesc *)(nbh->b_data);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
aed->previousAllocExtLocation =
cpu_to_le32(obloc.logicalBlockNum);
if (epos->offset + adsize > inode->i_sb->s_blocksize) {
loffset = epos->offset;
aed->lengthAllocDescs = cpu_to_le32(adsize);
sptr = ptr - adsize;
dptr = nbh->b_data + sizeof(struct allocExtDesc);
memcpy(dptr, sptr, adsize);
epos->offset = sizeof(struct allocExtDesc) + adsize;
} else {
loffset = epos->offset + adsize;
aed->lengthAllocDescs = cpu_to_le32(0);
sptr = ptr;
epos->offset = sizeof(struct allocExtDesc);
if (epos->bh) {
aed = (struct allocExtDesc *)epos->bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, adsize);
} else {
iinfo->i_lenAlloc += adsize;
mark_inode_dirty(inode);
}
}
if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
epos->block.logicalBlockNum, sizeof(struct tag));
else
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
epos->block.logicalBlockNum, sizeof(struct tag));
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
sad = (struct short_ad *)sptr;
sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
inode->i_sb->s_blocksize);
sad->extPosition =
cpu_to_le32(epos->block.logicalBlockNum);
break;
case ICBTAG_FLAG_AD_LONG:
lad = (struct long_ad *)sptr;
lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
inode->i_sb->s_blocksize);
lad->extLocation = cpu_to_lelb(epos->block);
memset(lad->impUse, 0x00, sizeof(lad->impUse));
break;
}
if (epos->bh) {
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(epos->bh->b_data, loffset);
else
udf_update_tag(epos->bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos->bh, inode);
brelse(epos->bh);
} else {
mark_inode_dirty(inode);
}
epos->bh = nbh;
}
udf_write_aext(inode, epos, eloc, elen, inc);
if (!epos->bh) {
iinfo->i_lenAlloc += adsize;
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)epos->bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, adsize);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(epos->bh->b_data,
epos->offset + (inc ? 0 : adsize));
else
udf_update_tag(epos->bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(epos->bh, inode);
}
return 0;
}
void udf_write_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
uint8_t *ptr;
struct short_ad *sad;
struct long_ad *lad;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh)
ptr = iinfo->i_ext.i_data + epos->offset -
udf_file_entry_alloc_offset(inode) +
iinfo->i_lenEAttr;
else
ptr = epos->bh->b_data + epos->offset;
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
sad = (struct short_ad *)ptr;
sad->extLength = cpu_to_le32(elen);
sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
adsize = sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
lad = (struct long_ad *)ptr;
lad->extLength = cpu_to_le32(elen);
lad->extLocation = cpu_to_lelb(*eloc);
memset(lad->impUse, 0x00, sizeof(lad->impUse));
adsize = sizeof(struct long_ad);
break;
default:
return;
}
if (epos->bh) {
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
struct allocExtDesc *aed =
(struct allocExtDesc *)epos->bh->b_data;
udf_update_tag(epos->bh->b_data,
le32_to_cpu(aed->lengthAllocDescs) +
sizeof(struct allocExtDesc));
}
mark_buffer_dirty_inode(epos->bh, inode);
} else {
mark_inode_dirty(inode);
}
if (inc)
epos->offset += adsize;
}
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int8_t etype;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
int block;
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
epos->bh = udf_tread(inode->i_sb, block);
if (!epos->bh) {
udf_debug("reading block %d failed!\n", block);
return -1;
}
}
return etype;
}
int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int alen;
int8_t etype;
uint8_t *ptr;
struct short_ad *sad;
struct long_ad *lad;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh) {
if (!epos->offset)
epos->offset = udf_file_entry_alloc_offset(inode);
ptr = iinfo->i_ext.i_data + epos->offset -
udf_file_entry_alloc_offset(inode) +
iinfo->i_lenEAttr;
alen = udf_file_entry_alloc_offset(inode) +
iinfo->i_lenAlloc;
} else {
if (!epos->offset)
epos->offset = sizeof(struct allocExtDesc);
ptr = epos->bh->b_data + epos->offset;
alen = sizeof(struct allocExtDesc) +
le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
lengthAllocDescs);
}
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
if (!sad)
return -1;
etype = le32_to_cpu(sad->extLength) >> 30;
eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
eloc->partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
break;
case ICBTAG_FLAG_AD_LONG:
lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
if (!lad)
return -1;
etype = le32_to_cpu(lad->extLength) >> 30;
*eloc = lelb_to_cpu(lad->extLocation);
*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
break;
default:
udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type);
return -1;
}
return etype;
}
static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
struct kernel_lb_addr neloc, uint32_t nelen)
{
struct kernel_lb_addr oeloc;
uint32_t oelen;
int8_t etype;
if (epos.bh)
get_bh(epos.bh);
while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
udf_write_aext(inode, &epos, &neloc, nelen, 1);
neloc = oeloc;
nelen = (etype << 30) | oelen;
}
udf_add_aext(inode, &epos, &neloc, nelen, 1);
brelse(epos.bh);
return (nelen >> 30);
}
int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
struct kernel_lb_addr eloc, uint32_t elen)
{
struct extent_position oepos;
int adsize;
int8_t etype;
struct allocExtDesc *aed;
struct udf_inode_info *iinfo;
if (epos.bh) {
get_bh(epos.bh);
get_bh(epos.bh);
}
iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
adsize = 0;
oepos = epos;
if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
return -1;
while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
if (oepos.bh != epos.bh) {
oepos.block = epos.block;
brelse(oepos.bh);
get_bh(epos.bh);
oepos.bh = epos.bh;
oepos.offset = epos.offset - adsize;
}
}
memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
elen = 0;
if (epos.bh != oepos.bh) {
udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
udf_write_aext(inode, &oepos, &eloc, elen, 1);
udf_write_aext(inode, &oepos, &eloc, elen, 1);
if (!oepos.bh) {
iinfo->i_lenAlloc -= (adsize * 2);
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(oepos.bh->b_data,
oepos.offset - (2 * adsize));
else
udf_update_tag(oepos.bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(oepos.bh, inode);
}
} else {
udf_write_aext(inode, &oepos, &eloc, elen, 1);
if (!oepos.bh) {
iinfo->i_lenAlloc -= adsize;
mark_inode_dirty(inode);
} else {
aed = (struct allocExtDesc *)oepos.bh->b_data;
le32_add_cpu(&aed->lengthAllocDescs, -adsize);
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
udf_update_tag(oepos.bh->b_data,
epos.offset - adsize);
else
udf_update_tag(oepos.bh->b_data,
sizeof(struct allocExtDesc));
mark_buffer_dirty_inode(oepos.bh, inode);
}
}
brelse(epos.bh);
brelse(oepos.bh);
return (elen >> 30);
}
int8_t inode_bmap(struct inode *inode, sector_t block,
struct extent_position *pos, struct kernel_lb_addr *eloc,
uint32_t *elen, sector_t *offset)
{
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
loff_t lbcount = 0, bcount =
(loff_t) block << blocksize_bits;
int8_t etype;
struct udf_inode_info *iinfo;
iinfo = UDF_I(inode);
if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
pos->offset = 0;
pos->block = iinfo->i_location;
pos->bh = NULL;
}
*elen = 0;
do {
etype = udf_next_aext(inode, pos, eloc, elen, 1);
if (etype == -1) {
*offset = (bcount - lbcount) >> blocksize_bits;
iinfo->i_lenExtents = lbcount;
return -1;
}
lbcount += *elen;
} while (lbcount <= bcount);
/* update extent cache */
udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
*offset = (bcount + *elen - lbcount) >> blocksize_bits;
return etype;
}
long udf_block_map(struct inode *inode, sector_t block)
{
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
int ret;
down_read(&UDF_I(inode)->i_data_sem);
if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
(EXT_RECORDED_ALLOCATED >> 30))
ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
else
ret = 0;
up_read(&UDF_I(inode)->i_data_sem);
brelse(epos.bh);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
return udf_fixed_to_variable(ret);
else
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_1624_0 |
crossvul-cpp_data_good_3631_0 | /*
* linux/fs/ext4/super.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <linux/jbd2.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/parser.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/crc16.h>
#include <linux/cleancache.h>
#include <asm/uaccess.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include "ext4.h"
#include "ext4_extents.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "mballoc.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>
static struct proc_dir_entry *ext4_proc_root;
static struct kset *ext4_kset;
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
static struct ext4_features *ext4_feat;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
unsigned long journal_devnum);
static int ext4_commit_super(struct super_block *sb, int sync);
static void ext4_mark_recovery_complete(struct super_block *sb,
struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
static const char *ext4_decode_error(struct super_block *sb, int errno,
char nbuf[16]);
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data);
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static struct file_system_type ext2_fs_type = {
.owner = THIS_MODULE,
.name = "ext2",
.mount = ext4_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif
#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static struct file_system_type ext3_fs_type = {
.owner = THIS_MODULE,
.name = "ext3",
.mount = ext4_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
#else
#define IS_EXT3_SB(sb) (0)
#endif
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
void *ret;
ret = kmalloc(size, flags);
if (!ret)
ret = __vmalloc(size, flags, PAGE_KERNEL);
return ret;
}
void *ext4_kvzalloc(size_t size, gfp_t flags)
{
void *ret;
ret = kzalloc(size, flags);
if (!ret)
ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
return ret;
}
void ext4_kvfree(void *ptr)
{
if (is_vmalloc_addr(ptr))
vfree(ptr);
else
kfree(ptr);
}
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le32_to_cpu(bg->bg_block_bitmap_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
}
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le32_to_cpu(bg->bg_inode_bitmap_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
}
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le32_to_cpu(bg->bg_inode_table_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
}
__u32 ext4_free_group_clusters(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le16_to_cpu(bg->bg_free_blocks_count_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
}
__u32 ext4_free_inodes_count(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le16_to_cpu(bg->bg_free_inodes_count_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
}
__u32 ext4_used_dirs_count(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le16_to_cpu(bg->bg_used_dirs_count_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
}
__u32 ext4_itable_unused_count(struct super_block *sb,
struct ext4_group_desc *bg)
{
return le16_to_cpu(bg->bg_itable_unused_lo) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
}
void ext4_block_bitmap_set(struct super_block *sb,
struct ext4_group_desc *bg, ext4_fsblk_t blk)
{
bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
}
void ext4_inode_bitmap_set(struct super_block *sb,
struct ext4_group_desc *bg, ext4_fsblk_t blk)
{
bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
}
void ext4_inode_table_set(struct super_block *sb,
struct ext4_group_desc *bg, ext4_fsblk_t blk)
{
bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
}
void ext4_free_group_clusters_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count)
{
bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}
void ext4_free_inodes_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count)
{
bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}
void ext4_used_dirs_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count)
{
bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}
void ext4_itable_unused_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count)
{
bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}
/* Just increment the non-pointer handle value */
static handle_t *ext4_get_nojournal(void)
{
handle_t *handle = current->journal_info;
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
ref_cnt++;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
return handle;
}
/* Decrement the non-pointer handle value */
static void ext4_put_nojournal(handle_t *handle)
{
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt == 0);
ref_cnt--;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
}
/*
* Wrappers for jbd2_journal_start/end.
*
* The only special thing we need to do here is to make sure that all
* journal_end calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
* appropriate.
*
* To avoid j_barrier hold in userspace when a user calls freeze(),
* ext4 prevents a new handle from being started by s_frozen, which
* is in an upper layer.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
journal_t *journal;
handle_t *handle;
trace_ext4_journal_start(sb, nblocks, _RET_IP_);
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
journal = EXT4_SB(sb)->s_journal;
handle = ext4_journal_current_handle();
/*
* If a handle has been started, it should be allowed to
* finish, otherwise deadlock could happen between freeze
* and others(e.g. truncate) due to the restart of the
* journal handle if the filesystem is forzen and active
* handles are not stopped.
*/
if (!handle)
vfs_check_frozen(sb, SB_FREEZE_TRANS);
if (!journal)
return ext4_get_nojournal();
/*
* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly.
*/
if (is_journal_aborted(journal)) {
ext4_abort(sb, "Detected aborted journal");
return ERR_PTR(-EROFS);
}
return jbd2_journal_start(journal, nblocks);
}
/*
* The only special thing we need to do here is to make sure that all
* jbd2_journal_stop calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
* appropriate.
*/
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
int err;
int rc;
if (!ext4_handle_valid(handle)) {
ext4_put_nojournal(handle);
return 0;
}
sb = handle->h_transaction->t_journal->j_private;
err = handle->h_err;
rc = jbd2_journal_stop(handle);
if (!err)
err = rc;
if (err)
__ext4_std_error(sb, where, line, err);
return err;
}
void ext4_journal_abort_handle(const char *caller, unsigned int line,
const char *err_fn, struct buffer_head *bh,
handle_t *handle, int err)
{
char nbuf[16];
const char *errstr = ext4_decode_error(NULL, err, nbuf);
BUG_ON(!ext4_handle_valid(handle));
if (bh)
BUFFER_TRACE(bh, "abort");
if (!handle->h_err)
handle->h_err = err;
if (is_handle_aborted(handle))
return;
printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n",
caller, line, errstr, err_fn);
jbd2_journal_abort_handle(handle);
}
static void __save_error_info(struct super_block *sb, const char *func,
unsigned int line)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
es->s_last_error_time = cpu_to_le32(get_seconds());
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line);
if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time;
strncpy(es->s_first_error_func, func,
sizeof(es->s_first_error_func));
es->s_first_error_line = cpu_to_le32(line);
es->s_first_error_ino = es->s_last_error_ino;
es->s_first_error_block = es->s_last_error_block;
}
/*
* Start the daily error reporting function if it hasn't been
* started already
*/
if (!es->s_error_count)
mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1);
}
static void save_error_info(struct super_block *sb, const char *func,
unsigned int line)
{
__save_error_info(sb, func, line);
ext4_commit_super(sb, 1);
}
/*
* The del_gendisk() function uninitializes the disk-specific data
* structures, including the bdi structure, without telling anyone
* else. Once this happens, any attempt to call mark_buffer_dirty()
* (for example, by ext4_commit_super), will cause a kernel OOPS.
* This is a kludge to prevent these oops until we can put in a proper
* hook in del_gendisk() to inform the VFS and file system layers.
*/
static int block_device_ejected(struct super_block *sb)
{
struct inode *bd_inode = sb->s_bdev->bd_inode;
struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info;
return bdi->dev == NULL;
}
/* Deal with the reporting of failure conditions on a filesystem such as
* inconsistencies detected or read IO failures.
*
* On ext2, we can store the error state of the filesystem in the
* superblock. That is not possible on ext4, because we may have other
* write ordering constraints on the superblock which prevent us from
* writing it out straight away; and given that the journal is about to
* be aborted, we can't rely on the current, or future, transactions to
* write out the superblock safely.
*
* We'll just use the jbd2_journal_abort() error code to record an error in
* the journal instead. On recovery, the journal will complain about
* that error until we've noted it down and cleared it.
*/
static void ext4_handle_error(struct super_block *sb)
{
if (sb->s_flags & MS_RDONLY)
return;
if (!test_opt(sb, ERRORS_CONT)) {
journal_t *journal = EXT4_SB(sb)->s_journal;
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (journal)
jbd2_journal_abort(journal, -EIO);
}
if (test_opt(sb, ERRORS_RO)) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs (device %s): panic forced after error\n",
sb->s_id);
}
void __ext4_error(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
sb->s_id, function, line, current->comm, &vaf);
va_end(args);
ext4_handle_error(sb);
}
void ext4_error_inode(struct inode *inode, const char *function,
unsigned int line, ext4_fsblk_t block,
const char *fmt, ...)
{
va_list args;
struct va_format vaf;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
es->s_last_error_block = cpu_to_le64(block);
save_error_info(inode->i_sb, function, line);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
inode->i_sb->s_id, function, line, inode->i_ino);
if (block)
printk(KERN_CONT "block %llu: ", block);
printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
va_end(args);
ext4_handle_error(inode->i_sb);
}
void ext4_error_file(struct file *file, const char *function,
unsigned int line, ext4_fsblk_t block,
const char *fmt, ...)
{
va_list args;
struct va_format vaf;
struct ext4_super_block *es;
struct inode *inode = file->f_dentry->d_inode;
char pathname[80], *path;
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
save_error_info(inode->i_sb, function, line);
path = d_path(&(file->f_path), pathname, sizeof(pathname));
if (IS_ERR(path))
path = "(unknown)";
printk(KERN_CRIT
"EXT4-fs error (device %s): %s:%d: inode #%lu: ",
inode->i_sb->s_id, function, line, inode->i_ino);
if (block)
printk(KERN_CONT "block %llu: ", block);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
va_end(args);
ext4_handle_error(inode->i_sb);
}
static const char *ext4_decode_error(struct super_block *sb, int errno,
char nbuf[16])
{
char *errstr = NULL;
switch (errno) {
case -EIO:
errstr = "IO failure";
break;
case -ENOMEM:
errstr = "Out of memory";
break;
case -EROFS:
if (!sb || (EXT4_SB(sb)->s_journal &&
EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
errstr = "Journal has aborted";
else
errstr = "Readonly filesystem";
break;
default:
/* If the caller passed in an extra buffer for unknown
* errors, textualise them now. Else we just return
* NULL. */
if (nbuf) {
/* Check for truncated error codes... */
if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
errstr = nbuf;
}
break;
}
return errstr;
}
/* __ext4_std_error decodes expected errors from journaling functions
* automatically and invokes the appropriate error response. */
void __ext4_std_error(struct super_block *sb, const char *function,
unsigned int line, int errno)
{
char nbuf[16];
const char *errstr;
/* Special case: if the error is EROFS, and we're not already
* inside a transaction, then there's really no point in logging
* an error. */
if (errno == -EROFS && journal_current_handle() == NULL &&
(sb->s_flags & MS_RDONLY))
return;
errstr = ext4_decode_error(sb, errno, nbuf);
printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
sb->s_id, function, line, errstr);
save_error_info(sb, function, line);
ext4_handle_error(sb);
}
/*
* ext4_abort is a much stronger failure handler than ext4_error. The
* abort function may be used to deal with unrecoverable failures such
* as journal IO errors or ENOMEM at a critical moment in log management.
*
* We unconditionally force the filesystem into an ABORT|READONLY state,
* unless the error response on the fs has been set to panic in which
* case we take the easy way out and panic immediately.
*/
void __ext4_abort(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
va_list args;
save_error_info(sb, function, line);
va_start(args, fmt);
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
function, line);
vprintk(fmt, args);
printk("\n");
va_end(args);
if ((sb->s_flags & MS_RDONLY) == 0) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (EXT4_SB(sb)->s_journal)
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
}
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs panic from previous error\n");
}
void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
va_end(args);
}
void __ext4_warning(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
sb->s_id, function, line, &vaf);
va_end(args);
}
void __ext4_grp_locked_error(const char *function, unsigned int line,
struct super_block *sb, ext4_group_t grp,
unsigned long ino, ext4_fsblk_t block,
const char *fmt, ...)
__releases(bitlock)
__acquires(bitlock)
{
struct va_format vaf;
va_list args;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
es->s_last_error_ino = cpu_to_le32(ino);
es->s_last_error_block = cpu_to_le64(block);
__save_error_info(sb, function, line);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
sb->s_id, function, line, grp);
if (ino)
printk(KERN_CONT "inode %lu: ", ino);
if (block)
printk(KERN_CONT "block %llu:", (unsigned long long) block);
printk(KERN_CONT "%pV\n", &vaf);
va_end(args);
if (test_opt(sb, ERRORS_CONT)) {
ext4_commit_super(sb, 0);
return;
}
ext4_unlock_group(sb, grp);
ext4_handle_error(sb);
/*
* We only get here in the ERRORS_RO case; relocking the group
* may be dangerous, but nothing bad will happen since the
* filesystem will have already been marked read/only and the
* journal has been aborted. We return 1 as a hint to callers
* who might what to use the return value from
* ext4_grp_locked_error() to distinguish between the
* ERRORS_CONT and ERRORS_RO case, and perhaps return more
* aggressively from the ext4 function in question, with a
* more appropriate error code.
*/
ext4_lock_group(sb, grp);
return;
}
void ext4_update_dynamic_rev(struct super_block *sb)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
return;
ext4_warning(sb,
"updating to rev %d because of new feature flag, "
"running e2fsck is recommended",
EXT4_DYNAMIC_REV);
es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
/* leave es->s_feature_*compat flags alone */
/* es->s_uuid will be set by e2fsck if empty */
/*
* The rest of the superblock fields should be zero, and if not it
* means they are likely already in use, so leave them alone. We
* can leave it up to e2fsck to clean up any inconsistencies there.
*/
}
/*
* Open the external journal device
*/
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
{
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
if (IS_ERR(bdev))
goto fail;
return bdev;
fail:
ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
__bdevname(dev, b), PTR_ERR(bdev));
return NULL;
}
/*
* Release the journal device
*/
static int ext4_blkdev_put(struct block_device *bdev)
{
return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
{
struct block_device *bdev;
int ret = -ENODEV;
bdev = sbi->journal_bdev;
if (bdev) {
ret = ext4_blkdev_put(bdev);
sbi->journal_bdev = NULL;
}
return ret;
}
static inline struct inode *orphan_list_entry(struct list_head *l)
{
return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
}
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
{
struct list_head *l;
ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
le32_to_cpu(sbi->s_es->s_last_orphan));
printk(KERN_ERR "sb_info orphan list:\n");
list_for_each(l, &sbi->s_orphan) {
struct inode *inode = orphan_list_entry(l);
printk(KERN_ERR " "
"inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
inode->i_sb->s_id, inode->i_ino, inode,
inode->i_mode, inode->i_nlink,
NEXT_ORPHAN(inode));
}
}
static void ext4_put_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int i, err;
ext4_unregister_li_request(sb);
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
flush_workqueue(sbi->dio_unwritten_wq);
destroy_workqueue(sbi->dio_unwritten_wq);
lock_super(sb);
if (sb->s_dirt)
ext4_commit_super(sb, 1);
if (sbi->s_journal) {
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
if (err < 0)
ext4_abort(sb, "Couldn't clean up the journal");
}
del_timer(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
ext4_xattr_put_super(sb);
if (!(sb->s_flags & MS_RDONLY)) {
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
es->s_state = cpu_to_le16(sbi->s_mount_state);
ext4_commit_super(sb, 1);
}
if (sbi->s_proc) {
remove_proc_entry(sb->s_id, ext4_proc_root);
}
kobject_del(&sbi->s_kobj);
for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]);
ext4_kvfree(sbi->s_group_desc);
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
/* Debugging code just in case the in-memory inode orphan list
* isn't empty. The on-disk one can be non-empty if we've
* detected an error and taken the fs readonly, but the
* in-memory list had better be clean by this point. */
if (!list_empty(&sbi->s_orphan))
dump_orphan_list(sb, sbi);
J_ASSERT(list_empty(&sbi->s_orphan));
invalidate_bdev(sb->s_bdev);
if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
sync_blockdev(sbi->journal_bdev);
invalidate_bdev(sbi->journal_bdev);
ext4_blkdev_remove(sbi);
}
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
sb->s_fs_info = NULL;
/*
* Now that we are completely done shutting down the
* superblock, we need to actually destroy the kobject.
*/
unlock_super(sb);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
}
static struct kmem_cache *ext4_inode_cachep;
/*
* Called inside transaction, so use GFP_NOFS
*/
static struct inode *ext4_alloc_inode(struct super_block *sb)
{
struct ext4_inode_info *ei;
ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
ei->vfs_inode.i_version = 1;
ei->vfs_inode.i_data.writeback_index = 0;
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
#endif
ei->jinode = NULL;
INIT_LIST_HEAD(&ei->i_completed_io_list);
spin_lock_init(&ei->i_completed_io_lock);
ei->cur_aio_dio = NULL;
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_aiodio_unwritten, 0);
return &ei->vfs_inode;
}
static int ext4_drop_inode(struct inode *inode)
{
int drop = generic_drop_inode(inode);
trace_ext4_drop_inode(inode, drop);
return drop;
}
static void ext4_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}
static void ext4_destroy_inode(struct inode *inode)
{
if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
ext4_msg(inode->i_sb, KERN_ERR,
"Inode %lu (%p): orphan list check failed!",
inode->i_ino, EXT4_I(inode));
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
EXT4_I(inode), sizeof(struct ext4_inode_info),
true);
dump_stack();
}
call_rcu(&inode->i_rcu, ext4_i_callback);
}
static void init_once(void *foo)
{
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT4_FS_XATTR
init_rwsem(&ei->xattr_sem);
#endif
init_rwsem(&ei->i_data_sem);
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
sizeof(struct ext4_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (ext4_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
kmem_cache_destroy(ext4_inode_cachep);
}
void ext4_clear_inode(struct inode *inode)
{
invalidate_inode_buffers(inode);
end_writeback(inode);
dquot_drop(inode);
ext4_discard_preallocations(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
jbd2_free_inode(EXT4_I(inode)->jinode);
EXT4_I(inode)->jinode = NULL;
}
}
static inline void ext4_show_quota_options(struct seq_file *seq,
struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (sbi->s_jquota_fmt) {
char *fmtname = "";
switch (sbi->s_jquota_fmt) {
case QFMT_VFS_OLD:
fmtname = "vfsold";
break;
case QFMT_VFS_V0:
fmtname = "vfsv0";
break;
case QFMT_VFS_V1:
fmtname = "vfsv1";
break;
}
seq_printf(seq, ",jqfmt=%s", fmtname);
}
if (sbi->s_qf_names[USRQUOTA])
seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
if (sbi->s_qf_names[GRPQUOTA])
seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
if (test_opt(sb, USRQUOTA))
seq_puts(seq, ",usrquota");
if (test_opt(sb, GRPQUOTA))
seq_puts(seq, ",grpquota");
#endif
}
/*
* Show an option if
* - it's set to a non-default value OR
* - if the per-sb default is different from the global default
*/
static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
int def_errors;
unsigned long def_mount_opts;
struct super_block *sb = vfs->mnt_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
def_errors = le16_to_cpu(es->s_errors);
if (sbi->s_sb_block != 1)
seq_printf(seq, ",sb=%llu", sbi->s_sb_block);
if (test_opt(sb, MINIX_DF))
seq_puts(seq, ",minixdf");
if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS))
seq_puts(seq, ",grpid");
if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS))
seq_puts(seq, ",nogrpid");
if (sbi->s_resuid != EXT4_DEF_RESUID ||
le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) {
seq_printf(seq, ",resuid=%u", sbi->s_resuid);
}
if (sbi->s_resgid != EXT4_DEF_RESGID ||
le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) {
seq_printf(seq, ",resgid=%u", sbi->s_resgid);
}
if (test_opt(sb, ERRORS_RO)) {
if (def_errors == EXT4_ERRORS_PANIC ||
def_errors == EXT4_ERRORS_CONTINUE) {
seq_puts(seq, ",errors=remount-ro");
}
}
if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
seq_puts(seq, ",errors=continue");
if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
seq_puts(seq, ",errors=panic");
if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16))
seq_puts(seq, ",nouid32");
if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG))
seq_puts(seq, ",debug");
#ifdef CONFIG_EXT4_FS_XATTR
if (test_opt(sb, XATTR_USER))
seq_puts(seq, ",user_xattr");
if (!test_opt(sb, XATTR_USER))
seq_puts(seq, ",nouser_xattr");
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
seq_puts(seq, ",acl");
if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
seq_puts(seq, ",noacl");
#endif
if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
seq_printf(seq, ",min_batch_time=%u",
(unsigned) sbi->s_min_batch_time);
}
if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
seq_printf(seq, ",max_batch_time=%u",
(unsigned) sbi->s_max_batch_time);
}
/*
* We're changing the default of barrier mount option, so
* let's always display its mount state so it's clear what its
* status is.
*/
seq_puts(seq, ",barrier=");
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
seq_puts(seq, ",journal_async_commit");
else if (test_opt(sb, JOURNAL_CHECKSUM))
seq_puts(seq, ",journal_checksum");
if (test_opt(sb, I_VERSION))
seq_puts(seq, ",i_version");
if (!test_opt(sb, DELALLOC) &&
!(def_mount_opts & EXT4_DEFM_NODELALLOC))
seq_puts(seq, ",nodelalloc");
if (!test_opt(sb, MBLK_IO_SUBMIT))
seq_puts(seq, ",nomblk_io_submit");
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
* journal mode get enabled in different ways
* So just print the value even if we didn't specify it
*/
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
seq_puts(seq, ",data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
seq_puts(seq, ",data=ordered");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
seq_puts(seq, ",data=writeback");
if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
seq_printf(seq, ",inode_readahead_blks=%u",
sbi->s_inode_readahead_blks);
if (test_opt(sb, DATA_ERR_ABORT))
seq_puts(seq, ",data_err=abort");
if (test_opt(sb, NO_AUTO_DA_ALLOC))
seq_puts(seq, ",noauto_da_alloc");
if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sb, NOLOAD))
seq_puts(seq, ",norecovery");
if (test_opt(sb, DIOREAD_NOLOCK))
seq_puts(seq, ",dioread_nolock");
if (test_opt(sb, BLOCK_VALIDITY) &&
!(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
seq_puts(seq, ",block_validity");
if (!test_opt(sb, INIT_INODE_TABLE))
seq_puts(seq, ",noinit_itable");
else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
seq_printf(seq, ",init_itable=%u",
(unsigned) sbi->s_li_wait_mult);
ext4_show_quota_options(seq, sb);
return 0;
}
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
struct inode *inode;
if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
return ERR_PTR(-ESTALE);
if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
return ERR_PTR(-ESTALE);
/* iget isn't really right if the inode is currently unallocated!!
*
* ext4_read_inode will return a bad_inode if the inode had been
* deleted, so we should be safe.
*
* Currently we don't know the generation for parent directory, so
* a generation of 0 means "accept any"
*/
inode = ext4_iget(sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return inode;
}
static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
ext4_nfs_get_inode);
}
static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
ext4_nfs_get_inode);
}
/*
* Try to release metadata pages (indirect blocks, directories) which are
* mapped via the block device. Since these pages could have journal heads
* which would prevent try_to_free_buffers() from freeing them, we must use
* jbd2 layer's try_to_free_buffers() function to release them.
*/
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
gfp_t wait)
{
journal_t *journal = EXT4_SB(sb)->s_journal;
WARN_ON(PageChecked(page));
if (!page_has_buffers(page))
return 0;
if (journal)
return jbd2_journal_try_to_free_buffers(journal, page,
wait & ~__GFP_WAIT);
return try_to_free_buffers(page);
}
#ifdef CONFIG_QUOTA
#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
struct path *path);
static int ext4_quota_off(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static const struct dquot_operations ext4_quota_operations = {
.get_reserved_space = ext4_get_reserved_space,
.write_dquot = ext4_write_dquot,
.acquire_dquot = ext4_acquire_dquot,
.release_dquot = ext4_release_dquot,
.mark_dirty = ext4_mark_dquot_dirty,
.write_info = ext4_write_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
};
static const struct quotactl_ops ext4_qctl_operations = {
.quota_on = ext4_quota_on,
.quota_off = ext4_quota_off,
.quota_sync = dquot_quota_sync,
.get_info = dquot_get_dqinfo,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
};
#endif
static const struct super_operations ext4_sops = {
.alloc_inode = ext4_alloc_inode,
.destroy_inode = ext4_destroy_inode,
.write_inode = ext4_write_inode,
.dirty_inode = ext4_dirty_inode,
.drop_inode = ext4_drop_inode,
.evict_inode = ext4_evict_inode,
.put_super = ext4_put_super,
.sync_fs = ext4_sync_fs,
.freeze_fs = ext4_freeze,
.unfreeze_fs = ext4_unfreeze,
.statfs = ext4_statfs,
.remount_fs = ext4_remount,
.show_options = ext4_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext4_quota_read,
.quota_write = ext4_quota_write,
#endif
.bdev_try_to_free_page = bdev_try_to_free_page,
};
static const struct super_operations ext4_nojournal_sops = {
.alloc_inode = ext4_alloc_inode,
.destroy_inode = ext4_destroy_inode,
.write_inode = ext4_write_inode,
.dirty_inode = ext4_dirty_inode,
.drop_inode = ext4_drop_inode,
.evict_inode = ext4_evict_inode,
.write_super = ext4_write_super,
.put_super = ext4_put_super,
.statfs = ext4_statfs,
.remount_fs = ext4_remount,
.show_options = ext4_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext4_quota_read,
.quota_write = ext4_quota_write,
#endif
.bdev_try_to_free_page = bdev_try_to_free_page,
};
static const struct export_operations ext4_export_ops = {
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
};
enum {
Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh,
Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
Opt_journal_update, Opt_journal_dev,
Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
};
static const match_table_t tokens = {
{Opt_bsd_df, "bsddf"},
{Opt_minix_df, "minixdf"},
{Opt_grpid, "grpid"},
{Opt_grpid, "bsdgroups"},
{Opt_nogrpid, "nogrpid"},
{Opt_nogrpid, "sysvgroups"},
{Opt_resgid, "resgid=%u"},
{Opt_resuid, "resuid=%u"},
{Opt_sb, "sb=%u"},
{Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_nouid32, "nouid32"},
{Opt_debug, "debug"},
{Opt_oldalloc, "oldalloc"},
{Opt_orlov, "orlov"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_noload, "noload"},
{Opt_noload, "norecovery"},
{Opt_nobh, "nobh"},
{Opt_bh, "bh"},
{Opt_commit, "commit=%u"},
{Opt_min_batch_time, "min_batch_time=%u"},
{Opt_max_batch_time, "max_batch_time=%u"},
{Opt_journal_update, "journal=update"},
{Opt_journal_dev, "journal_dev=%u"},
{Opt_journal_checksum, "journal_checksum"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_abort, "abort"},
{Opt_data_journal, "data=journal"},
{Opt_data_ordered, "data=ordered"},
{Opt_data_writeback, "data=writeback"},
{Opt_data_err_abort, "data_err=abort"},
{Opt_data_err_ignore, "data_err=ignore"},
{Opt_offusrjquota, "usrjquota="},
{Opt_usrjquota, "usrjquota=%s"},
{Opt_offgrpjquota, "grpjquota="},
{Opt_grpjquota, "grpjquota=%s"},
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
{Opt_grpquota, "grpquota"},
{Opt_noquota, "noquota"},
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
{Opt_i_version, "i_version"},
{Opt_stripe, "stripe=%u"},
{Opt_resize, "resize"},
{Opt_delalloc, "delalloc"},
{Opt_nodelalloc, "nodelalloc"},
{Opt_mblk_io_submit, "mblk_io_submit"},
{Opt_nomblk_io_submit, "nomblk_io_submit"},
{Opt_block_validity, "block_validity"},
{Opt_noblock_validity, "noblock_validity"},
{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
{Opt_journal_ioprio, "journal_ioprio=%u"},
{Opt_auto_da_alloc, "auto_da_alloc=%u"},
{Opt_auto_da_alloc, "auto_da_alloc"},
{Opt_noauto_da_alloc, "noauto_da_alloc"},
{Opt_dioread_nolock, "dioread_nolock"},
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_init_itable, "init_itable=%u"},
{Opt_init_itable, "init_itable"},
{Opt_noinit_itable, "noinit_itable"},
{Opt_err, NULL},
};
static ext4_fsblk_t get_sb_block(void **data)
{
ext4_fsblk_t sb_block;
char *options = (char *) *data;
if (!options || strncmp(options, "sb=", 3) != 0)
return 1; /* Default location */
options += 3;
/* TODO: use simple_strtoll with >32bit ext4 */
sb_block = simple_strtoul(options, &options, 0);
if (*options && *options != ',') {
printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
(char *) *data);
return 1;
}
if (*options == ',')
options++;
*data = (void *) options;
return sb_block;
}
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
char *qname;
if (sb_any_quota_loaded(sb) &&
!sbi->s_qf_names[qtype]) {
ext4_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
return 0;
}
qname = match_strdup(args);
if (!qname) {
ext4_msg(sb, KERN_ERR,
"Not enough memory for storing quotafile name");
return 0;
}
if (sbi->s_qf_names[qtype] &&
strcmp(sbi->s_qf_names[qtype], qname)) {
ext4_msg(sb, KERN_ERR,
"%s quota file already specified", QTYPE2NAME(qtype));
kfree(qname);
return 0;
}
sbi->s_qf_names[qtype] = qname;
if (strchr(sbi->s_qf_names[qtype], '/')) {
ext4_msg(sb, KERN_ERR,
"quotafile must be on filesystem root");
kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
return 0;
}
set_opt(sb, QUOTA);
return 1;
}
static int clear_qf_name(struct super_block *sb, int qtype)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (sb_any_quota_loaded(sb) &&
sbi->s_qf_names[qtype]) {
ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
return 0;
}
/*
* The space will be released later when all options are confirmed
* to be correct
*/
sbi->s_qf_names[qtype] = NULL;
return 1;
}
#endif
static int parse_options(char *options, struct super_block *sb,
unsigned long *journal_devnum,
unsigned int *journal_ioprio,
ext4_fsblk_t *n_blocks_count, int is_remount)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
char *p;
substring_t args[MAX_OPT_ARGS];
int data_opt = 0;
int option;
#ifdef CONFIG_QUOTA
int qfmt;
#endif
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
/*
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
args[0].to = args[0].from = NULL;
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sb, MINIX_DF);
break;
case Opt_minix_df:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sb, MINIX_DF);
break;
case Opt_grpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
set_opt(sb, GRPID);
break;
case Opt_nogrpid:
ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
clear_opt(sb, GRPID);
break;
case Opt_resuid:
if (match_int(&args[0], &option))
return 0;
sbi->s_resuid = option;
break;
case Opt_resgid:
if (match_int(&args[0], &option))
return 0;
sbi->s_resgid = option;
break;
case Opt_sb:
/* handled by get_sb_block() instead of here */
/* *sb_block = match_int(&args[0]); */
break;
case Opt_err_panic:
clear_opt(sb, ERRORS_CONT);
clear_opt(sb, ERRORS_RO);
set_opt(sb, ERRORS_PANIC);
break;
case Opt_err_ro:
clear_opt(sb, ERRORS_CONT);
clear_opt(sb, ERRORS_PANIC);
set_opt(sb, ERRORS_RO);
break;
case Opt_err_cont:
clear_opt(sb, ERRORS_RO);
clear_opt(sb, ERRORS_PANIC);
set_opt(sb, ERRORS_CONT);
break;
case Opt_nouid32:
set_opt(sb, NO_UID32);
break;
case Opt_debug:
set_opt(sb, DEBUG);
break;
case Opt_oldalloc:
ext4_msg(sb, KERN_WARNING,
"Ignoring deprecated oldalloc option");
break;
case Opt_orlov:
ext4_msg(sb, KERN_WARNING,
"Ignoring deprecated orlov option");
break;
#ifdef CONFIG_EXT4_FS_XATTR
case Opt_user_xattr:
set_opt(sb, XATTR_USER);
break;
case Opt_nouser_xattr:
clear_opt(sb, XATTR_USER);
break;
#else
case Opt_user_xattr:
case Opt_nouser_xattr:
ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported");
break;
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
case Opt_acl:
set_opt(sb, POSIX_ACL);
break;
case Opt_noacl:
clear_opt(sb, POSIX_ACL);
break;
#else
case Opt_acl:
case Opt_noacl:
ext4_msg(sb, KERN_ERR, "(no)acl options not supported");
break;
#endif
case Opt_journal_update:
/* @@@ FIXME */
/* Eventually we will want to be able to create
a journal file here. For now, only allow the
user to specify an existing inode to be the
journal file. */
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return 0;
}
set_opt(sb, UPDATE_JOURNAL);
break;
case Opt_journal_dev:
if (is_remount) {
ext4_msg(sb, KERN_ERR,
"Cannot specify journal on remount");
return 0;
}
if (match_int(&args[0], &option))
return 0;
*journal_devnum = option;
break;
case Opt_journal_checksum:
set_opt(sb, JOURNAL_CHECKSUM);
break;
case Opt_journal_async_commit:
set_opt(sb, JOURNAL_ASYNC_COMMIT);
set_opt(sb, JOURNAL_CHECKSUM);
break;
case Opt_noload:
set_opt(sb, NOLOAD);
break;
case Opt_commit:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
if (option == 0)
option = JBD2_DEFAULT_MAX_COMMIT_AGE;
sbi->s_commit_interval = HZ * option;
break;
case Opt_max_batch_time:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
if (option == 0)
option = EXT4_DEF_MAX_BATCH_TIME;
sbi->s_max_batch_time = option;
break;
case Opt_min_batch_time:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_min_batch_time = option;
break;
case Opt_data_journal:
data_opt = EXT4_MOUNT_JOURNAL_DATA;
goto datacheck;
case Opt_data_ordered:
data_opt = EXT4_MOUNT_ORDERED_DATA;
goto datacheck;
case Opt_data_writeback:
data_opt = EXT4_MOUNT_WRITEBACK_DATA;
datacheck:
if (is_remount) {
if (!sbi->s_journal)
ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
else if (test_opt(sb, DATA_FLAGS) != data_opt) {
ext4_msg(sb, KERN_ERR,
"Cannot change data mode on remount");
return 0;
}
} else {
clear_opt(sb, DATA_FLAGS);
sbi->s_mount_opt |= data_opt;
}
break;
case Opt_data_err_abort:
set_opt(sb, DATA_ERR_ABORT);
break;
case Opt_data_err_ignore:
clear_opt(sb, DATA_ERR_ABORT);
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
if (!set_qf_name(sb, USRQUOTA, &args[0]))
return 0;
break;
case Opt_grpjquota:
if (!set_qf_name(sb, GRPQUOTA, &args[0]))
return 0;
break;
case Opt_offusrjquota:
if (!clear_qf_name(sb, USRQUOTA))
return 0;
break;
case Opt_offgrpjquota:
if (!clear_qf_name(sb, GRPQUOTA))
return 0;
break;
case Opt_jqfmt_vfsold:
qfmt = QFMT_VFS_OLD;
goto set_qf_format;
case Opt_jqfmt_vfsv0:
qfmt = QFMT_VFS_V0;
goto set_qf_format;
case Opt_jqfmt_vfsv1:
qfmt = QFMT_VFS_V1;
set_qf_format:
if (sb_any_quota_loaded(sb) &&
sbi->s_jquota_fmt != qfmt) {
ext4_msg(sb, KERN_ERR, "Cannot change "
"journaled quota options when "
"quota turned on");
return 0;
}
sbi->s_jquota_fmt = qfmt;
break;
case Opt_quota:
case Opt_usrquota:
set_opt(sb, QUOTA);
set_opt(sb, USRQUOTA);
break;
case Opt_grpquota:
set_opt(sb, QUOTA);
set_opt(sb, GRPQUOTA);
break;
case Opt_noquota:
if (sb_any_quota_loaded(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot change quota "
"options when quota turned on");
return 0;
}
clear_opt(sb, QUOTA);
clear_opt(sb, USRQUOTA);
clear_opt(sb, GRPQUOTA);
break;
#else
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
ext4_msg(sb, KERN_ERR,
"quota options not supported");
break;
case Opt_usrjquota:
case Opt_grpjquota:
case Opt_offusrjquota:
case Opt_offgrpjquota:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
case Opt_jqfmt_vfsv1:
ext4_msg(sb, KERN_ERR,
"journaled quota options not supported");
break;
case Opt_noquota:
break;
#endif
case Opt_abort:
sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
break;
case Opt_nobarrier:
clear_opt(sb, BARRIER);
break;
case Opt_barrier:
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
set_opt(sb, BARRIER);
else
clear_opt(sb, BARRIER);
break;
case Opt_ignore:
break;
case Opt_resize:
if (!is_remount) {
ext4_msg(sb, KERN_ERR,
"resize option only available "
"for remount");
return 0;
}
if (match_int(&args[0], &option) != 0)
return 0;
*n_blocks_count = option;
break;
case Opt_nobh:
ext4_msg(sb, KERN_WARNING,
"Ignoring deprecated nobh option");
break;
case Opt_bh:
ext4_msg(sb, KERN_WARNING,
"Ignoring deprecated bh option");
break;
case Opt_i_version:
set_opt(sb, I_VERSION);
sb->s_flags |= MS_I_VERSION;
break;
case Opt_nodelalloc:
clear_opt(sb, DELALLOC);
clear_opt2(sb, EXPLICIT_DELALLOC);
break;
case Opt_mblk_io_submit:
set_opt(sb, MBLK_IO_SUBMIT);
break;
case Opt_nomblk_io_submit:
clear_opt(sb, MBLK_IO_SUBMIT);
break;
case Opt_stripe:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_stripe = option;
break;
case Opt_delalloc:
set_opt(sb, DELALLOC);
set_opt2(sb, EXPLICIT_DELALLOC);
break;
case Opt_block_validity:
set_opt(sb, BLOCK_VALIDITY);
break;
case Opt_noblock_validity:
clear_opt(sb, BLOCK_VALIDITY);
break;
case Opt_inode_readahead_blks:
if (match_int(&args[0], &option))
return 0;
if (option < 0 || option > (1 << 30))
return 0;
if (option && !is_power_of_2(option)) {
ext4_msg(sb, KERN_ERR,
"EXT4-fs: inode_readahead_blks"
" must be a power of 2");
return 0;
}
sbi->s_inode_readahead_blks = option;
break;
case Opt_journal_ioprio:
if (match_int(&args[0], &option))
return 0;
if (option < 0 || option > 7)
break;
*journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
option);
break;
case Opt_noauto_da_alloc:
set_opt(sb, NO_AUTO_DA_ALLOC);
break;
case Opt_auto_da_alloc:
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = 1; /* No argument, default to 1 */
if (option)
clear_opt(sb, NO_AUTO_DA_ALLOC);
else
set_opt(sb,NO_AUTO_DA_ALLOC);
break;
case Opt_discard:
set_opt(sb, DISCARD);
break;
case Opt_nodiscard:
clear_opt(sb, DISCARD);
break;
case Opt_dioread_nolock:
set_opt(sb, DIOREAD_NOLOCK);
break;
case Opt_dioread_lock:
clear_opt(sb, DIOREAD_NOLOCK);
break;
case Opt_init_itable:
set_opt(sb, INIT_INODE_TABLE);
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
} else
option = EXT4_DEF_LI_WAIT_MULT;
if (option < 0)
return 0;
sbi->s_li_wait_mult = option;
break;
case Opt_noinit_itable:
clear_opt(sb, INIT_INODE_TABLE);
break;
default:
ext4_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
"or missing value", p);
return 0;
}
}
#ifdef CONFIG_QUOTA
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sb, GRPQUOTA);
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
ext4_msg(sb, KERN_ERR, "old and new quota "
"format mixing");
return 0;
}
if (!sbi->s_jquota_fmt) {
ext4_msg(sb, KERN_ERR, "journaled quota format "
"not specified");
return 0;
}
} else {
if (sbi->s_jquota_fmt) {
ext4_msg(sb, KERN_ERR, "journaled quota format "
"specified with no journaling "
"enabled");
return 0;
}
}
#endif
return 1;
}
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
int read_only)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
int res = 0;
if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
ext4_msg(sb, KERN_ERR, "revision level too high, "
"forcing read-only mode");
res = MS_RDONLY;
}
if (read_only)
goto done;
if (!(sbi->s_mount_state & EXT4_VALID_FS))
ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
"running e2fsck is recommended");
else if ((sbi->s_mount_state & EXT4_ERROR_FS))
ext4_msg(sb, KERN_WARNING,
"warning: mounting fs with errors, "
"running e2fsck is recommended");
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
le16_to_cpu(es->s_mnt_count) >=
(unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
ext4_msg(sb, KERN_WARNING,
"warning: maximal mount count reached, "
"running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
(le32_to_cpu(es->s_lastcheck) +
le32_to_cpu(es->s_checkinterval) <= get_seconds()))
ext4_msg(sb, KERN_WARNING,
"warning: checktime reached, "
"running e2fsck is recommended");
if (!sbi->s_journal)
es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
es->s_mtime = cpu_to_le32(get_seconds());
ext4_update_dynamic_rev(sb);
if (sbi->s_journal)
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
done:
if (test_opt(sb, DEBUG))
printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
sb->s_blocksize,
sbi->s_groups_count,
EXT4_BLOCKS_PER_GROUP(sb),
EXT4_INODES_PER_GROUP(sb),
sbi->s_mount_opt, sbi->s_mount_opt2);
cleancache_init_fs(sb);
return res;
}
static int ext4_fill_flex_info(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
ext4_group_t flex_group_count;
ext4_group_t flex_group;
unsigned int groups_per_flex = 0;
size_t size;
int i;
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
sbi->s_log_groups_per_flex = 0;
return 1;
}
groups_per_flex = 1 << sbi->s_log_groups_per_flex;
/* We allocate both existing and potentially added groups */
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
size = flex_group_count * sizeof(struct flex_groups);
sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
if (sbi->s_flex_groups == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
flex_group_count);
goto failed;
}
for (i = 0; i < sbi->s_groups_count; i++) {
gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp),
&sbi->s_flex_groups[flex_group].free_inodes);
atomic_add(ext4_free_group_clusters(sb, gdp),
&sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp),
&sbi->s_flex_groups[flex_group].used_dirs);
}
return 1;
failed:
return 0;
}
__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
struct ext4_group_desc *gdp)
{
__u16 crc = 0;
if (sbi->s_es->s_feature_ro_compat &
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
int offset = offsetof(struct ext4_group_desc, bg_checksum);
__le32 le_group = cpu_to_le32(block_group);
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
if ((sbi->s_es->s_feature_incompat &
cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
offset < le16_to_cpu(sbi->s_es->s_desc_size))
crc = crc16(crc, (__u8 *)gdp + offset,
le16_to_cpu(sbi->s_es->s_desc_size) -
offset);
}
return cpu_to_le16(crc);
}
int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
struct ext4_group_desc *gdp)
{
if ((sbi->s_es->s_feature_ro_compat &
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
(gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
return 0;
return 1;
}
/* Called at mount-time, super-block is locked */
static int ext4_check_descriptors(struct super_block *sb,
ext4_group_t *first_not_zeroed)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
int flexbg_flag = 0;
ext4_group_t i, grp = sbi->s_groups_count;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
flexbg_flag = 1;
ext4_debug("Checking group descriptors");
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
if (i == sbi->s_groups_count - 1 || flexbg_flag)
last_block = ext4_blocks_count(sbi->s_es) - 1;
else
last_block = first_block +
(EXT4_BLOCKS_PER_GROUP(sb) - 1);
if ((grp == sbi->s_groups_count) &&
!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
grp = i;
block_bitmap = ext4_block_bitmap(sb, gdp);
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u not in group "
"(block %llu)!", i, block_bitmap);
return 0;
}
inode_bitmap = ext4_inode_bitmap(sb, gdp);
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u not in group "
"(block %llu)!", i, inode_bitmap);
return 0;
}
inode_table = ext4_inode_table(sb, gdp);
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u not in group "
"(block %llu)!", i, inode_table);
return 0;
}
ext4_lock_group(sb, i);
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Checksum for group %u failed (%u!=%u)",
i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
gdp)), le16_to_cpu(gdp->bg_checksum));
if (!(sb->s_flags & MS_RDONLY)) {
ext4_unlock_group(sb, i);
return 0;
}
}
ext4_unlock_group(sb, i);
if (!flexbg_flag)
first_block += EXT4_BLOCKS_PER_GROUP(sb);
}
if (NULL != first_not_zeroed)
*first_not_zeroed = grp;
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, ext4_count_free_clusters(sb)));
sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
return 1;
}
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
* the superblock) which were deleted from all directories, but held open by
* a process at the time of a crash. We walk the list and try to delete these
* inodes at recovery time (only with a read-write filesystem).
*
* In order to keep the orphan inode chain consistent during traversal (in
* case of crash during recovery), we link each inode into the superblock
* orphan list_head and handle it the same way as an inode deletion during
* normal operation (which journals the operations for us).
*
* We only do an iget() and an iput() on each inode, which is very safe if we
* accidentally point at an in-use or already deleted inode. The worst that
* can happen in this case is that we get a "bit already cleared" message from
* ext4_free_inode(). The only reason we would point at a wrong inode is if
* e2fsck was run on this filesystem, and it must have already done the orphan
* inode cleanup for us, so we can safely abort without any further action.
*/
static void ext4_orphan_cleanup(struct super_block *sb,
struct ext4_super_block *es)
{
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
int i;
#endif
if (!es->s_last_orphan) {
jbd_debug(4, "no orphan inodes to clean up\n");
return;
}
if (bdev_read_only(sb->s_bdev)) {
ext4_msg(sb, KERN_ERR, "write access "
"unavailable, skipping orphan cleanup");
return;
}
/* Check if feature set would not allow a r/w mount */
if (!ext4_feature_set_ok(sb, 0)) {
ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
"unknown ROCOMPAT features");
return;
}
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
"clearing orphan list.\n");
es->s_last_orphan = 0;
jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
return;
}
if (s_flags & MS_RDONLY) {
ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
sb->s_flags &= ~MS_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
/* Turn on quotas so that they are updated correctly */
for (i = 0; i < MAXQUOTAS; i++) {
if (EXT4_SB(sb)->s_qf_names[i]) {
int ret = ext4_quota_on_mount(sb, i);
if (ret < 0)
ext4_msg(sb, KERN_ERR,
"Cannot turn on journaled "
"quota: error %d", ret);
}
}
#endif
while (es->s_last_orphan) {
struct inode *inode;
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
if (IS_ERR(inode)) {
es->s_last_orphan = 0;
break;
}
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
dquot_initialize(inode);
if (inode->i_nlink) {
ext4_msg(sb, KERN_DEBUG,
"%s: truncating inode %lu to %lld bytes",
__func__, inode->i_ino, inode->i_size);
jbd_debug(2, "truncating inode %lu to %lld bytes\n",
inode->i_ino, inode->i_size);
ext4_truncate(inode);
nr_truncates++;
} else {
ext4_msg(sb, KERN_DEBUG,
"%s: deleting unreferenced inode %lu",
__func__, inode->i_ino);
jbd_debug(2, "deleting unreferenced inode %lu\n",
inode->i_ino);
nr_orphans++;
}
iput(inode); /* The delete magic happens here! */
}
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
if (nr_orphans)
ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
PLURAL(nr_orphans));
if (nr_truncates)
ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
if (sb_dqopt(sb)->files[i])
dquot_quota_off(sb, i);
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
}
/*
* Maximal extent format file size.
* Resulting logical blkno at s_maxbytes must fit in our on-disk
* extent format containers, within a sector_t, and within i_blocks
* in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
* so that won't be a limiting factor.
*
* However there is other limiting factor. We do store extents in the form
* of starting block and length, hence the resulting length of the extent
* covering maximum file size must fit into on-disk format containers as
* well. Given that length is always by 1 unit bigger than max unit (because
* we count 0 as well) we have to lower the s_maxbytes by one fs block.
*
* Note, this does *not* consider any metadata overhead for vfs i_blocks.
*/
static loff_t ext4_max_size(int blkbits, int has_huge_files)
{
loff_t res;
loff_t upper_limit = MAX_LFS_FILESIZE;
/* small i_blocks in vfs inode? */
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
/*
* CONFIG_LBDAF is not enabled implies the inode
* i_block represent total blocks in 512 bytes
* 32 == size of vfs inode i_blocks * 8
*/
upper_limit = (1LL << 32) - 1;
/* total blocks in file system block size */
upper_limit >>= (blkbits - 9);
upper_limit <<= blkbits;
}
/*
* 32-bit extent-start container, ee_block. We lower the maxbytes
* by one fs block, so ee_len can cover the extent of maximum file
* size
*/
res = (1LL << 32) - 1;
res <<= blkbits;
/* Sanity check against vm- & vfs- imposed limits */
if (res > upper_limit)
res = upper_limit;
return res;
}
/*
* Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
* block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
* We need to be 1 filesystem block less than the 2^48 sector limit.
*/
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
{
loff_t res = EXT4_NDIR_BLOCKS;
int meta_blocks;
loff_t upper_limit;
/* This is calculated to be the largest file size for a dense, block
* mapped file such that the file's total number of 512-byte sectors,
* including data and all indirect blocks, does not exceed (2^48 - 1).
*
* __u32 i_blocks_lo and _u16 i_blocks_high represent the total
* number of 512-byte sectors of the file.
*/
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
/*
* !has_huge_files or CONFIG_LBDAF not enabled implies that
* the inode i_block field represents total file blocks in
* 2^32 512-byte sectors == size of vfs inode i_blocks * 8
*/
upper_limit = (1LL << 32) - 1;
/* total blocks in file system block size */
upper_limit >>= (bits - 9);
} else {
/*
* We use 48 bit ext4_inode i_blocks
* With EXT4_HUGE_FILE_FL set the i_blocks
* represent total number of blocks in
* file system block size
*/
upper_limit = (1LL << 48) - 1;
}
/* indirect blocks */
meta_blocks = 1;
/* double indirect blocks */
meta_blocks += 1 + (1LL << (bits-2));
/* tripple indirect blocks */
meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
upper_limit -= meta_blocks;
upper_limit <<= bits;
res += 1LL << (bits-2);
res += 1LL << (2*(bits-2));
res += 1LL << (3*(bits-2));
res <<= bits;
if (res > upper_limit)
res = upper_limit;
if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE;
return res;
}
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
ext4_fsblk_t logical_sb_block, int nr)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t bg, first_meta_bg;
int has_super = 0;
first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
nr < first_meta_bg)
return logical_sb_block + nr + 1;
bg = sbi->s_desc_per_block * nr;
if (ext4_bg_has_super(sb, bg))
has_super = 1;
return (has_super + ext4_group_first_block_no(sb, bg));
}
/**
* ext4_get_stripe_size: Get the stripe size.
* @sbi: In memory super block info
*
* If we have specified it via mount option, then
* use the mount option value. If the value specified at mount time is
* greater than the blocks per group use the super block value.
* If the super block value is greater than blocks per group return 0.
* Allocator needs it be less than blocks per group.
*
*/
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
unsigned long stripe_width =
le32_to_cpu(sbi->s_es->s_raid_stripe_width);
int ret;
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
ret = sbi->s_stripe;
else if (stripe_width <= sbi->s_blocks_per_group)
ret = stripe_width;
else if (stride <= sbi->s_blocks_per_group)
ret = stride;
else
ret = 0;
/*
* If the stripe width is 1, this makes no sense and
* we set it to 0 to turn off stripe handling code.
*/
if (ret <= 1)
ret = 0;
return ret;
}
/* sysfs supprt */
struct ext4_attr {
struct attribute attr;
ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
const char *, size_t);
int offset;
};
static int parse_strtoul(const char *buf,
unsigned long max, unsigned long *value)
{
char *endp;
*value = simple_strtoul(skip_spaces(buf), &endp, 0);
endp = skip_spaces(endp);
if (*endp || *value > max)
return -EINVAL;
return 0;
}
static ssize_t delayed_allocation_blocks_show(struct ext4_attr *a,
struct ext4_sb_info *sbi,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%llu\n",
(s64) EXT4_C2B(sbi,
percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
}
static ssize_t session_write_kbytes_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%lu\n",
(part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
sbi->s_sectors_written_start) >> 1);
}
static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written +
((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
}
static ssize_t extent_cache_hits_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
}
static ssize_t extent_cache_misses_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses);
}
static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
struct ext4_sb_info *sbi,
const char *buf, size_t count)
{
unsigned long t;
if (parse_strtoul(buf, 0x40000000, &t))
return -EINVAL;
if (t && !is_power_of_2(t))
return -EINVAL;
sbi->s_inode_readahead_blks = t;
return count;
}
static ssize_t sbi_ui_show(struct ext4_attr *a,
struct ext4_sb_info *sbi, char *buf)
{
unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
}
static ssize_t sbi_ui_store(struct ext4_attr *a,
struct ext4_sb_info *sbi,
const char *buf, size_t count)
{
unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
unsigned long t;
if (parse_strtoul(buf, 0xffffffff, &t))
return -EINVAL;
*ui = t;
return count;
}
#define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
static struct ext4_attr ext4_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
.offset = offsetof(struct ext4_sb_info, _elname), \
}
#define EXT4_ATTR(name, mode, show, store) \
static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
#define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL)
#define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
#define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
#define EXT4_RW_ATTR_SBI_UI(name, elname) \
EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
#define ATTR_LIST(name) &ext4_attr_##name.attr
EXT4_RO_ATTR(delayed_allocation_blocks);
EXT4_RO_ATTR(session_write_kbytes);
EXT4_RO_ATTR(lifetime_write_kbytes);
EXT4_RO_ATTR(extent_cache_hits);
EXT4_RO_ATTR(extent_cache_misses);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
inode_readahead_blks_store, s_inode_readahead_blks);
EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
static struct attribute *ext4_attrs[] = {
ATTR_LIST(delayed_allocation_blocks),
ATTR_LIST(session_write_kbytes),
ATTR_LIST(lifetime_write_kbytes),
ATTR_LIST(extent_cache_hits),
ATTR_LIST(extent_cache_misses),
ATTR_LIST(inode_readahead_blks),
ATTR_LIST(inode_goal),
ATTR_LIST(mb_stats),
ATTR_LIST(mb_max_to_scan),
ATTR_LIST(mb_min_to_scan),
ATTR_LIST(mb_order2_req),
ATTR_LIST(mb_stream_req),
ATTR_LIST(mb_group_prealloc),
ATTR_LIST(max_writeback_mb_bump),
NULL,
};
/* Features this copy of ext4 supports */
EXT4_INFO_ATTR(lazy_itable_init);
EXT4_INFO_ATTR(batched_discard);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
NULL,
};
static ssize_t ext4_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
s_kobj);
struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
return a->show ? a->show(a, sbi, buf) : 0;
}
static ssize_t ext4_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
s_kobj);
struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
return a->store ? a->store(a, sbi, buf, len) : 0;
}
static void ext4_sb_release(struct kobject *kobj)
{
struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
s_kobj);
complete(&sbi->s_kobj_unregister);
}
static const struct sysfs_ops ext4_attr_ops = {
.show = ext4_attr_show,
.store = ext4_attr_store,
};
static struct kobj_type ext4_ktype = {
.default_attrs = ext4_attrs,
.sysfs_ops = &ext4_attr_ops,
.release = ext4_sb_release,
};
static void ext4_feat_release(struct kobject *kobj)
{
complete(&ext4_feat->f_kobj_unregister);
}
static struct kobj_type ext4_feat_ktype = {
.default_attrs = ext4_feat_attrs,
.sysfs_ops = &ext4_attr_ops,
.release = ext4_feat_release,
};
/*
* Check whether this filesystem can be mounted based on
* the features present and the RDONLY/RDWR mount requested.
* Returns 1 if this filesystem can be mounted as requested,
* 0 if it cannot be.
*/
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) {
ext4_msg(sb, KERN_ERR,
"Couldn't mount because of "
"unsupported optional features (%x)",
(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
~EXT4_FEATURE_INCOMPAT_SUPP));
return 0;
}
if (readonly)
return 1;
/* Check that feature set is OK for a read-write mount */
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
"unsupported optional features (%x)",
(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
~EXT4_FEATURE_RO_COMPAT_SUPP));
return 0;
}
/*
* Large file size enabled file system can only be mounted
* read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
*/
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
if (sizeof(blkcnt_t) < sizeof(u64)) {
ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
"cannot be mounted RDWR without "
"CONFIG_LBDAF");
return 0;
}
}
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC) &&
!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
ext4_msg(sb, KERN_ERR,
"Can't support bigalloc feature without "
"extents feature\n");
return 0;
}
return 1;
}
/*
* This function is called once a day if we have errors logged
* on the file system
*/
static void print_daily_error_info(unsigned long arg)
{
struct super_block *sb = (struct super_block *) arg;
struct ext4_sb_info *sbi;
struct ext4_super_block *es;
sbi = EXT4_SB(sb);
es = sbi->s_es;
if (es->s_error_count)
ext4_msg(sb, KERN_NOTICE, "error count: %u",
le32_to_cpu(es->s_error_count));
if (es->s_first_error_time) {
printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
sb->s_id, le32_to_cpu(es->s_first_error_time),
(int) sizeof(es->s_first_error_func),
es->s_first_error_func,
le32_to_cpu(es->s_first_error_line));
if (es->s_first_error_ino)
printk(": inode %u",
le32_to_cpu(es->s_first_error_ino));
if (es->s_first_error_block)
printk(": block %llu", (unsigned long long)
le64_to_cpu(es->s_first_error_block));
printk("\n");
}
if (es->s_last_error_time) {
printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
sb->s_id, le32_to_cpu(es->s_last_error_time),
(int) sizeof(es->s_last_error_func),
es->s_last_error_func,
le32_to_cpu(es->s_last_error_line));
if (es->s_last_error_ino)
printk(": inode %u",
le32_to_cpu(es->s_last_error_ino));
if (es->s_last_error_block)
printk(": block %llu", (unsigned long long)
le64_to_cpu(es->s_last_error_block));
printk("\n");
}
mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
}
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
struct ext4_group_desc *gdp = NULL;
ext4_group_t group, ngroups;
struct super_block *sb;
unsigned long timeout = 0;
int ret = 0;
sb = elr->lr_super;
ngroups = EXT4_SB(sb)->s_groups_count;
for (group = elr->lr_next_group; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp) {
ret = 1;
break;
}
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
break;
}
if (group == ngroups)
ret = 1;
if (!ret) {
timeout = jiffies;
ret = ext4_init_inode_table(sb, group,
elr->lr_timeout ? 0 : 1);
if (elr->lr_timeout == 0) {
timeout = (jiffies - timeout) *
elr->lr_sbi->s_li_wait_mult;
elr->lr_timeout = timeout;
}
elr->lr_next_sched = jiffies + elr->lr_timeout;
elr->lr_next_group = group + 1;
}
return ret;
}
/*
* Remove lr_request from the list_request and free the
* request structure. Should be called with li_list_mtx held
*/
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
struct ext4_sb_info *sbi;
if (!elr)
return;
sbi = elr->lr_sbi;
list_del(&elr->lr_request);
sbi->s_li_request = NULL;
kfree(elr);
}
static void ext4_unregister_li_request(struct super_block *sb)
{
mutex_lock(&ext4_li_mtx);
if (!ext4_li_info) {
mutex_unlock(&ext4_li_mtx);
return;
}
mutex_lock(&ext4_li_info->li_list_mtx);
ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
mutex_unlock(&ext4_li_info->li_list_mtx);
mutex_unlock(&ext4_li_mtx);
}
static struct task_struct *ext4_lazyinit_task;
/*
* This is the function where ext4lazyinit thread lives. It walks
* through the request list searching for next scheduled filesystem.
* When such a fs is found, run the lazy initialization request
* (ext4_rn_li_request) and keep track of the time spend in this
* function. Based on that time we compute next schedule time of
* the request. When walking through the list is complete, compute
* next waking time and put itself into sleep.
*/
static int ext4_lazyinit_thread(void *arg)
{
struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
struct list_head *pos, *n;
struct ext4_li_request *elr;
unsigned long next_wakeup, cur;
BUG_ON(NULL == eli);
cont_thread:
while (true) {
next_wakeup = MAX_JIFFY_OFFSET;
mutex_lock(&eli->li_list_mtx);
if (list_empty(&eli->li_request_list)) {
mutex_unlock(&eli->li_list_mtx);
goto exit_thread;
}
list_for_each_safe(pos, n, &eli->li_request_list) {
elr = list_entry(pos, struct ext4_li_request,
lr_request);
if (time_after_eq(jiffies, elr->lr_next_sched)) {
if (ext4_run_li_request(elr) != 0) {
/* error, remove the lazy_init job */
ext4_remove_li_request(elr);
continue;
}
}
if (time_before(elr->lr_next_sched, next_wakeup))
next_wakeup = elr->lr_next_sched;
}
mutex_unlock(&eli->li_list_mtx);
if (freezing(current))
refrigerator();
cur = jiffies;
if ((time_after_eq(cur, next_wakeup)) ||
(MAX_JIFFY_OFFSET == next_wakeup)) {
cond_resched();
continue;
}
schedule_timeout_interruptible(next_wakeup - cur);
if (kthread_should_stop()) {
ext4_clear_request_list();
goto exit_thread;
}
}
exit_thread:
/*
* It looks like the request list is empty, but we need
* to check it under the li_list_mtx lock, to prevent any
* additions into it, and of course we should lock ext4_li_mtx
* to atomically free the list and ext4_li_info, because at
* this point another ext4 filesystem could be registering
* new one.
*/
mutex_lock(&ext4_li_mtx);
mutex_lock(&eli->li_list_mtx);
if (!list_empty(&eli->li_request_list)) {
mutex_unlock(&eli->li_list_mtx);
mutex_unlock(&ext4_li_mtx);
goto cont_thread;
}
mutex_unlock(&eli->li_list_mtx);
kfree(ext4_li_info);
ext4_li_info = NULL;
mutex_unlock(&ext4_li_mtx);
return 0;
}
static void ext4_clear_request_list(void)
{
struct list_head *pos, *n;
struct ext4_li_request *elr;
mutex_lock(&ext4_li_info->li_list_mtx);
list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
elr = list_entry(pos, struct ext4_li_request,
lr_request);
ext4_remove_li_request(elr);
}
mutex_unlock(&ext4_li_info->li_list_mtx);
}
static int ext4_run_lazyinit_thread(void)
{
ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
ext4_li_info, "ext4lazyinit");
if (IS_ERR(ext4_lazyinit_task)) {
int err = PTR_ERR(ext4_lazyinit_task);
ext4_clear_request_list();
kfree(ext4_li_info);
ext4_li_info = NULL;
printk(KERN_CRIT "EXT4: error %d creating inode table "
"initialization thread\n",
err);
return err;
}
ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
return 0;
}
/*
* Check whether it make sense to run itable init. thread or not.
* If there is at least one uninitialized inode table, return
* corresponding group number, else the loop goes through all
* groups and return total number of groups.
*/
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
struct ext4_group_desc *gdp = NULL;
for (group = 0; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp)
continue;
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
break;
}
return group;
}
static int ext4_li_info_new(void)
{
struct ext4_lazy_init *eli = NULL;
eli = kzalloc(sizeof(*eli), GFP_KERNEL);
if (!eli)
return -ENOMEM;
INIT_LIST_HEAD(&eli->li_request_list);
mutex_init(&eli->li_list_mtx);
eli->li_state |= EXT4_LAZYINIT_QUIT;
ext4_li_info = eli;
return 0;
}
static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
ext4_group_t start)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_li_request *elr;
unsigned long rnd;
elr = kzalloc(sizeof(*elr), GFP_KERNEL);
if (!elr)
return NULL;
elr->lr_super = sb;
elr->lr_sbi = sbi;
elr->lr_next_group = start;
/*
* Randomize first schedule time of the request to
* spread the inode table initialization requests
* better.
*/
get_random_bytes(&rnd, sizeof(rnd));
elr->lr_next_sched = jiffies + (unsigned long)rnd %
(EXT4_DEF_LI_MAX_START_DELAY * HZ);
return elr;
}
static int ext4_register_li_request(struct super_block *sb,
ext4_group_t first_not_zeroed)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_li_request *elr;
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
int ret = 0;
if (sbi->s_li_request != NULL) {
/*
* Reset timeout so it can be computed again, because
* s_li_wait_mult might have changed.
*/
sbi->s_li_request->lr_timeout = 0;
return 0;
}
if (first_not_zeroed == ngroups ||
(sb->s_flags & MS_RDONLY) ||
!test_opt(sb, INIT_INODE_TABLE))
return 0;
elr = ext4_li_request_new(sb, first_not_zeroed);
if (!elr)
return -ENOMEM;
mutex_lock(&ext4_li_mtx);
if (NULL == ext4_li_info) {
ret = ext4_li_info_new();
if (ret)
goto out;
}
mutex_lock(&ext4_li_info->li_list_mtx);
list_add(&elr->lr_request, &ext4_li_info->li_request_list);
mutex_unlock(&ext4_li_info->li_list_mtx);
sbi->s_li_request = elr;
/*
* set elr to NULL here since it has been inserted to
* the request_list and the removal and free of it is
* handled by ext4_clear_request_list from now on.
*/
elr = NULL;
if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
ret = ext4_run_lazyinit_thread();
if (ret)
goto out;
}
out:
mutex_unlock(&ext4_li_mtx);
if (ret)
kfree(elr);
return ret;
}
/*
* We do not need to lock anything since this is called on
* module unload.
*/
static void ext4_destroy_lazyinit_thread(void)
{
/*
* If thread exited earlier
* there's nothing to be done.
*/
if (!ext4_li_info || !ext4_lazyinit_task)
return;
kthread_stop(ext4_lazyinit_task);
}
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi;
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
unsigned long offset = 0;
unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
char *cp;
const char *descr;
int ret = -ENOMEM;
int blocksize, clustersize;
unsigned int db_count;
unsigned int i;
int needs_recovery, has_huge_files, has_bigalloc;
__u64 blocks_count;
int err;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto out_free_orig;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
goto out_free_orig;
}
sb->s_fs_info = sbi;
sbi->s_mount_opt = 0;
sbi->s_resuid = EXT4_DEF_RESUID;
sbi->s_resgid = EXT4_DEF_RESGID;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block;
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
part_stat_read(sb->s_bdev->bd_part, sectors[1]);
/* Cleanup superblock name */
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
*cp = '!';
ret = -EINVAL;
blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
if (!blocksize) {
ext4_msg(sb, KERN_ERR, "unable to set blocksize");
goto out_fail;
}
/*
* The ext4 superblock will not be buffer aligned for other than 1kB
* block sizes. We need to calculate the offset from buffer start.
*/
if (blocksize != EXT4_MIN_BLOCK_SIZE) {
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
} else {
logical_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logical_sb_block))) {
ext4_msg(sb, KERN_ERR, "unable to read superblock");
goto out_fail;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext4 macro-instructions depend on its value
*/
es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT4_SUPER_MAGIC)
goto cantfind_ext4;
sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
set_opt(sb, INIT_INODE_TABLE);
if (def_mount_opts & EXT4_DEFM_DEBUG)
set_opt(sb, DEBUG);
if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
"2.6.38");
set_opt(sb, GRPID);
}
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */
#ifdef CONFIG_EXT4_FS_XATTR
set_opt(sb, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
set_opt(sb, MBLK_IO_SUBMIT);
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
set_opt(sb, JOURNAL_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
set_opt(sb, ORDERED_DATA);
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
set_opt(sb, WRITEBACK_DATA);
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
set_opt(sb, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
set_opt(sb, ERRORS_CONT);
else
set_opt(sb, ERRORS_RO);
if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
set_opt(sb, BLOCK_VALIDITY);
if (def_mount_opts & EXT4_DEFM_DISCARD)
set_opt(sb, DISCARD);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
set_opt(sb, BARRIER);
/*
* enable delayed allocation by default
* Use -o nodelalloc to turn it off
*/
if (!IS_EXT3_SB(sb) &&
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
set_opt(sb, DELALLOC);
/*
* set default s_li_wait_mult for lazyinit, for the case there is
* no mount option specified.
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
&journal_devnum, &journal_ioprio, NULL, 0)) {
ext4_msg(sb, KERN_WARNING,
"failed to parse options in superblock: %s",
sbi->s_es->s_mount_opts);
}
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, NULL, 0))
goto failed_mount;
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
"with data=journal disables delayed "
"allocation and O_DIRECT support!\n");
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and delalloc");
goto failed_mount;
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and delalloc");
goto failed_mount;
}
if (test_opt(sb, DELALLOC))
clear_opt(sb, DELALLOC);
}
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (test_opt(sb, DIOREAD_NOLOCK)) {
if (blocksize < PAGE_SIZE) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"dioread_nolock if block size != PAGE_SIZE");
goto failed_mount;
}
}
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U)))
ext4_msg(sb, KERN_WARNING,
"feature flags set on rev 0 fs, "
"running e2fsck is recommended");
if (IS_EXT2_SB(sb)) {
if (ext2_feature_set_ok(sb))
ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
"using the ext4 subsystem");
else {
ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
"to feature incompatibilities");
goto failed_mount;
}
}
if (IS_EXT3_SB(sb)) {
if (ext3_feature_set_ok(sb))
ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
"using the ext4 subsystem");
else {
ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
"to feature incompatibilities");
goto failed_mount;
}
}
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
goto failed_mount;
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
"Unsupported filesystem blocksize %d", blocksize);
goto failed_mount;
}
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
ext4_msg(sb, KERN_ERR, "bad block size %d",
blocksize);
goto failed_mount;
}
brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
bh = sb_bread(sb, logical_sb_block);
if (!bh) {
ext4_msg(sb, KERN_ERR,
"Can't read superblock on 2nd try");
goto failed_mount;
}
es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
ext4_msg(sb, KERN_ERR,
"Magic mismatch, very weird!");
goto failed_mount;
}
}
has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
has_huge_files);
sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
ext4_msg(sb, KERN_ERR,
"unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
!is_power_of_2(sbi->s_desc_size)) {
ext4_msg(sb, KERN_ERR,
"unsupported descriptor size %lu",
sbi->s_desc_size);
goto failed_mount;
}
} else
sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
sbi->s_def_hash_version = es->s_def_hash_version;
i = le32_to_cpu(es->s_flags);
if (i & EXT2_FLAGS_UNSIGNED_HASH)
sbi->s_hash_unsigned = 3;
else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
sbi->s_hash_unsigned = 3;
#else
es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
sb->s_dirt = 1;
}
/* Handle clustersize */
clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
has_bigalloc = EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC);
if (has_bigalloc) {
if (clustersize < blocksize) {
ext4_msg(sb, KERN_ERR,
"cluster size (%d) smaller than "
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
le32_to_cpu(es->s_clusters_per_group);
if (sbi->s_clusters_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
"#clusters per group too big: %lu",
sbi->s_clusters_per_group);
goto failed_mount;
}
if (sbi->s_blocks_per_group !=
(sbi->s_clusters_per_group * (clustersize / blocksize))) {
ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
"clusters per group (%lu) inconsistent",
sbi->s_blocks_per_group,
sbi->s_clusters_per_group);
goto failed_mount;
}
} else {
if (clustersize != blocksize) {
ext4_warning(sb, "fragment/cluster size (%d) != "
"block size (%d)", clustersize,
blocksize);
clustersize = blocksize;
}
if (sbi->s_blocks_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
"#blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
sbi->s_clusters_per_group = sbi->s_blocks_per_group;
sbi->s_cluster_bits = 0;
}
sbi->s_cluster_ratio = clustersize / blocksize;
if (sbi->s_inodes_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
"#inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
/*
* Test whether we have more sectors than will fit in sector_t,
* and whether the max offset is addressable by the page cache.
*/
err = generic_check_addressable(sb->s_blocksize_bits,
ext4_blocks_count(es));
if (err) {
ext4_msg(sb, KERN_ERR, "filesystem"
" too large to mount safely on this system");
if (sizeof(sector_t) < 8)
ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
ret = err;
goto failed_mount;
}
if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext4;
/* check blocks count against device size */
blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
if (blocks_count && ext4_blocks_count(es) > blocks_count) {
ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
"exceeds size of device (%llu blocks)",
ext4_blocks_count(es), blocks_count);
goto failed_mount;
}
/*
* It makes no sense for the first data block to be beyond the end
* of the filesystem.
*/
if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
"block %u is beyond end of filesystem (%llu)",
le32_to_cpu(es->s_first_data_block),
ext4_blocks_count(es));
goto failed_mount;
}
blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
"(block count %llu, first data block %u, "
"blocks per group %lu)", sbi->s_groups_count,
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
goto failed_mount;
}
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
sbi->s_group_desc = ext4_kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
goto failed_mount;
}
if (ext4_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
bgl_lock_init(sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
db_count = i;
goto failed_mount2;
}
}
if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
goto failed_mount2;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
init_timer(&sbi->s_err_report);
sbi->s_err_report.function = print_daily_error_info;
sbi->s_err_report.data = (unsigned long) sb;
err = percpu_counter_init(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
}
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
}
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_max_writeback_mb_bump = 128;
/*
* set up enough so that it can read an inode
*/
if (!test_opt(sb, NOLOAD) &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
sb->s_op = &ext4_sops;
else
sb->s_op = &ext4_nojournal_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->s_qcop = &ext4_qctl_operations;
sb->dq_op = &ext4_quota_operations;
#endif
memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
sbi->s_resize_flags = 0;
sb->s_root = NULL;
needs_recovery = (es->s_last_orphan != 0 ||
EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_RECOVER));
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
!(sb->s_flags & MS_RDONLY))
if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
goto failed_mount3;
/*
* The first inode we look at is the journal inode. Don't try
* root first: it may be modified in the journal!
*/
if (!test_opt(sb, NOLOAD) &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext4_load_journal(sb, es, journal_devnum))
goto failed_mount3;
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
goto failed_mount_wq;
} else {
clear_opt(sb, DATA_FLAGS);
sbi->s_journal = NULL;
needs_recovery = 0;
goto no_journal;
}
if (ext4_blocks_count(es) > 0xffffffffULL &&
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
goto failed_mount_wq;
}
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
jbd2_journal_set_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
jbd2_journal_set_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
jbd2_journal_clear_features(sbi->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else {
jbd2_journal_clear_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
}
/* We have now updated the journal if required, so we can
* validate the data journaling mode. */
switch (test_opt(sb, DATA_FLAGS)) {
case 0:
/* No mode set, assume a default based on the journal
* capabilities: ORDERED_DATA if the journal can
* cope, else JOURNAL_DATA
*/
if (jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
set_opt(sb, ORDERED_DATA);
else
set_opt(sb, JOURNAL_DATA);
break;
case EXT4_MOUNT_ORDERED_DATA:
case EXT4_MOUNT_WRITEBACK_DATA:
if (!jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
ext4_msg(sb, KERN_ERR, "Journal does not support "
"requested data journaling mode");
goto failed_mount_wq;
}
default:
break;
}
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
/*
* The journal may have updated the bg summary counts, so we
* need to update the global counters.
*/
percpu_counter_set(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
percpu_counter_set(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
percpu_counter_set(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
no_journal:
/*
* The maximum number of concurrent works can be high and
* concurrency isn't really necessary. Limit it to 1.
*/
EXT4_SB(sb)->dio_unwritten_wq =
alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
goto failed_mount_wq;
}
/*
* The jbd2_journal_load will have done any necessary log recovery,
* so we can safely mount the rest of the filesystem now.
*/
root = ext4_iget(sb, EXT4_ROOT_INO);
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
root = NULL;
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
ext4_msg(sb, KERN_ERR, "get root dentry failed");
ret = -ENOMEM;
goto failed_mount4;
}
ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
/* Check if enough inode space is available */
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
ext4_msg(sb, KERN_INFO, "required extra inode space not"
"available");
}
err = ext4_setup_system_zone(sb);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize system "
"zone (%d)", err);
goto failed_mount4;
}
ext4_ext_init(sb);
err = ext4_mb_init(sb, needs_recovery);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
err);
goto failed_mount5;
}
err = ext4_register_li_request(sb, first_not_zeroed);
if (err)
goto failed_mount6;
sbi->s_kobj.kset = ext4_kset;
init_completion(&sbi->s_kobj_unregister);
err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL,
"%s", sb->s_id);
if (err)
goto failed_mount7;
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
if (needs_recovery) {
ext4_msg(sb, KERN_INFO, "recovery complete");
ext4_mark_recovery_complete(sb, es);
}
if (EXT4_SB(sb)->s_journal) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
descr = " journalled data mode";
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
descr = " ordered data mode";
else
descr = " writeback data mode";
} else
descr = "out journal";
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
kfree(orig_data);
return 0;
cantfind_ext4:
if (!silent)
ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
goto failed_mount;
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
ext4_ext_release(sb);
failed_mount5:
ext4_mb_release(sb);
ext4_release_system_zone(sb);
failed_mount4:
iput(root);
sb->s_root = NULL;
ext4_msg(sb, KERN_ERR, "mount failed");
destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
failed_mount_wq:
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3:
del_timer(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
ext4_kvfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_proc) {
remove_proc_entry(sb->s_id, ext4_proc_root);
}
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
ext4_blkdev_remove(sbi);
brelse(bh);
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
out_free_orig:
kfree(orig_data);
return ret;
}
/*
* Setup any per-fs journal parameters now. We'll do this both on
* initial mount, once the journal has been initialised but before we've
* done any recovery; and again on any subsequent remount.
*/
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
journal->j_commit_interval = sbi->s_commit_interval;
journal->j_min_batch_time = sbi->s_min_batch_time;
journal->j_max_batch_time = sbi->s_max_batch_time;
write_lock(&journal->j_state_lock);
if (test_opt(sb, BARRIER))
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags &= ~JBD2_BARRIER;
if (test_opt(sb, DATA_ERR_ABORT))
journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
else
journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
write_unlock(&journal->j_state_lock);
}
static journal_t *ext4_get_journal(struct super_block *sb,
unsigned int journal_inum)
{
struct inode *journal_inode;
journal_t *journal;
BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
/* First, test for the existence of a valid inode on disk. Bad
* things happen if we iget() an unused inode, as the subsequent
* iput() will try to delete it. */
journal_inode = ext4_iget(sb, journal_inum);
if (IS_ERR(journal_inode)) {
ext4_msg(sb, KERN_ERR, "no journal found");
return NULL;
}
if (!journal_inode->i_nlink) {
make_bad_inode(journal_inode);
iput(journal_inode);
ext4_msg(sb, KERN_ERR, "journal inode is deleted");
return NULL;
}
jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
journal_inode, journal_inode->i_size);
if (!S_ISREG(journal_inode->i_mode)) {
ext4_msg(sb, KERN_ERR, "invalid journal inode");
iput(journal_inode);
return NULL;
}
journal = jbd2_journal_init_inode(journal_inode);
if (!journal) {
ext4_msg(sb, KERN_ERR, "Could not load journal inode");
iput(journal_inode);
return NULL;
}
journal->j_private = sb;
ext4_init_journal_params(sb, journal);
return journal;
}
static journal_t *ext4_get_dev_journal(struct super_block *sb,
dev_t j_dev)
{
struct buffer_head *bh;
journal_t *journal;
ext4_fsblk_t start;
ext4_fsblk_t len;
int hblock, blocksize;
ext4_fsblk_t sb_block;
unsigned long offset;
struct ext4_super_block *es;
struct block_device *bdev;
BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
bdev = ext4_blkdev_get(j_dev, sb);
if (bdev == NULL)
return NULL;
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
ext4_msg(sb, KERN_ERR,
"blocksize too small for journal device");
goto out_bdev;
}
sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
offset = EXT4_MIN_BLOCK_SIZE % blocksize;
set_blocksize(bdev, blocksize);
if (!(bh = __bread(bdev, sb_block, blocksize))) {
ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
"external journal");
goto out_bdev;
}
es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
!(le32_to_cpu(es->s_feature_incompat) &
EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
ext4_msg(sb, KERN_ERR, "external journal has "
"bad superblock");
brelse(bh);
goto out_bdev;
}
if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
ext4_msg(sb, KERN_ERR, "journal UUID does not match");
brelse(bh);
goto out_bdev;
}
len = ext4_blocks_count(es);
start = sb_block + 1;
brelse(bh); /* we're done with the superblock */
journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
start, len, blocksize);
if (!journal) {
ext4_msg(sb, KERN_ERR, "failed to create device journal");
goto out_bdev;
}
journal->j_private = sb;
ll_rw_block(READ, 1, &journal->j_sb_buffer);
wait_on_buffer(journal->j_sb_buffer);
if (!buffer_uptodate(journal->j_sb_buffer)) {
ext4_msg(sb, KERN_ERR, "I/O error on journal device");
goto out_journal;
}
if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
ext4_msg(sb, KERN_ERR, "External journal has more than one "
"user (unsupported) - %d",
be32_to_cpu(journal->j_superblock->s_nr_users));
goto out_journal;
}
EXT4_SB(sb)->journal_bdev = bdev;
ext4_init_journal_params(sb, journal);
return journal;
out_journal:
jbd2_journal_destroy(journal);
out_bdev:
ext4_blkdev_put(bdev);
return NULL;
}
static int ext4_load_journal(struct super_block *sb,
struct ext4_super_block *es,
unsigned long journal_devnum)
{
journal_t *journal;
unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
dev_t journal_dev;
int err = 0;
int really_read_only;
BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
if (journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
ext4_msg(sb, KERN_INFO, "external journal device major/minor "
"numbers have changed");
journal_dev = new_decode_dev(journal_devnum);
} else
journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
really_read_only = bdev_read_only(sb->s_bdev);
/*
* Are we loading a blank journal or performing recovery after a
* crash? For recovery, we need to check in advance whether we
* can get read-write access to the device.
*/
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
if (sb->s_flags & MS_RDONLY) {
ext4_msg(sb, KERN_INFO, "INFO: recovery "
"required on readonly filesystem");
if (really_read_only) {
ext4_msg(sb, KERN_ERR, "write access "
"unavailable, cannot proceed");
return -EROFS;
}
ext4_msg(sb, KERN_INFO, "write access will "
"be enabled during recovery");
}
}
if (journal_inum && journal_dev) {
ext4_msg(sb, KERN_ERR, "filesystem has both journal "
"and inode journals!");
return -EINVAL;
}
if (journal_inum) {
if (!(journal = ext4_get_journal(sb, journal_inum)))
return -EINVAL;
} else {
if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
return -EINVAL;
}
if (!(journal->j_flags & JBD2_BARRIER))
ext4_msg(sb, KERN_INFO, "barriers disabled");
if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
err = jbd2_journal_update_format(journal);
if (err) {
ext4_msg(sb, KERN_ERR, "error updating journal");
jbd2_journal_destroy(journal);
return err;
}
}
if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER))
err = jbd2_journal_wipe(journal, !really_read_only);
if (!err) {
char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
if (save)
memcpy(save, ((char *) es) +
EXT4_S_ERR_START, EXT4_S_ERR_LEN);
err = jbd2_journal_load(journal);
if (save)
memcpy(((char *) es) + EXT4_S_ERR_START,
save, EXT4_S_ERR_LEN);
kfree(save);
}
if (err) {
ext4_msg(sb, KERN_ERR, "error loading journal");
jbd2_journal_destroy(journal);
return err;
}
EXT4_SB(sb)->s_journal = journal;
ext4_clear_journal_err(sb, es);
if (!really_read_only && journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
es->s_journal_dev = cpu_to_le32(journal_devnum);
/* Make sure we flush the recovery flag to disk. */
ext4_commit_super(sb, 1);
}
return 0;
}
static int ext4_commit_super(struct super_block *sb, int sync)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
int error = 0;
if (!sbh || block_device_ejected(sb))
return error;
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
* superblock failed. This could happen because the
* USB device was yanked out. Or it could happen to
* be a transient write error and maybe the block will
* be remapped. Nothing we can do but to retry the
* write and hope for the best.
*/
ext4_msg(sb, KERN_ERR, "previous I/O error to "
"superblock detected");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
* write time when we are mounting the root file system
* read/only but we need to replay the journal; at that point,
* for people who are east of GMT and who make their clock
* tick in localtime for Windows bug-for-bug compatibility,
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
if (!(sb->s_flags & MS_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
if (sb->s_bdev->bd_part)
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1));
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
ext4_free_blocks_count_set(es,
EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeclusters_counter)));
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
sb->s_dirt = 0;
BUFFER_TRACE(sbh, "marking dirty");
mark_buffer_dirty(sbh);
if (sync) {
error = sync_dirty_buffer(sbh);
if (error)
return error;
error = buffer_write_io_error(sbh);
if (error) {
ext4_msg(sb, KERN_ERR, "I/O error while writing "
"superblock");
clear_buffer_write_io_error(sbh);
set_buffer_uptodate(sbh);
}
}
return error;
}
/*
* Have we just finished recovery? If so, and if we are mounting (or
* remounting) the filesystem readonly, then we will end up with a
* consistent fs on disk. Record that fact.
*/
static void ext4_mark_recovery_complete(struct super_block *sb,
struct ext4_super_block *es)
{
journal_t *journal = EXT4_SB(sb)->s_journal;
if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
BUG_ON(journal != NULL);
return;
}
jbd2_journal_lock_updates(journal);
if (jbd2_journal_flush(journal) < 0)
goto out;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) &&
sb->s_flags & MS_RDONLY) {
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
}
out:
jbd2_journal_unlock_updates(journal);
}
/*
* If we are mounting (or read-write remounting) a filesystem whose journal
* has recorded an error from a previous lifetime, move that error to the
* main filesystem now.
*/
static void ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es)
{
journal_t *journal;
int j_errno;
const char *errstr;
BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
journal = EXT4_SB(sb)->s_journal;
/*
* Now check for any error status which may have been recorded in the
* journal by a prior ext4_error() or ext4_abort()
*/
j_errno = jbd2_journal_errno(journal);
if (j_errno) {
char nbuf[16];
errstr = ext4_decode_error(sb, j_errno, nbuf);
ext4_warning(sb, "Filesystem error recorded "
"from previous mount: %s", errstr);
ext4_warning(sb, "Marking fs in need of filesystem check.");
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
ext4_commit_super(sb, 1);
jbd2_journal_clear_err(journal);
}
}
/*
* Force the running and committing transactions to commit,
* and wait on the commit.
*/
int ext4_force_commit(struct super_block *sb)
{
journal_t *journal;
int ret = 0;
if (sb->s_flags & MS_RDONLY)
return 0;
journal = EXT4_SB(sb)->s_journal;
if (journal) {
vfs_check_frozen(sb, SB_FREEZE_TRANS);
ret = ext4_journal_force_commit(journal);
}
return ret;
}
static void ext4_write_super(struct super_block *sb)
{
lock_super(sb);
ext4_commit_super(sb, 1);
unlock_super(sb);
}
static int ext4_sync_fs(struct super_block *sb, int wait)
{
int ret = 0;
tid_t target;
struct ext4_sb_info *sbi = EXT4_SB(sb);
trace_ext4_sync_fs(sb, wait);
flush_workqueue(sbi->dio_unwritten_wq);
if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
if (wait)
jbd2_log_wait_commit(sbi->s_journal, target);
}
return ret;
}
/*
* LVM calls this function before a (read-only) snapshot is created. This
* gives us a chance to flush the journal completely and mark the fs clean.
*
* Note that only this function cannot bring a filesystem to be in a clean
* state independently, because ext4 prevents a new handle from being started
* by @sb->s_frozen, which stays in an upper layer. It thus needs help from
* the upper layer.
*/
static int ext4_freeze(struct super_block *sb)
{
int error = 0;
journal_t *journal;
if (sb->s_flags & MS_RDONLY)
return 0;
journal = EXT4_SB(sb)->s_journal;
/* Now we set up the journal barrier. */
jbd2_journal_lock_updates(journal);
/*
* Don't clear the needs_recovery flag if we failed to flush
* the journal.
*/
error = jbd2_journal_flush(journal);
if (error < 0)
goto out;
/* Journal blocked and flushed, clear needs_recovery flag. */
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
error = ext4_commit_super(sb, 1);
out:
/* we rely on s_frozen to stop further updates */
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
return error;
}
/*
* Called by LVM after the snapshot is done. We need to reset the RECOVER
* flag here, even though the filesystem is not technically dirty yet.
*/
static int ext4_unfreeze(struct super_block *sb)
{
if (sb->s_flags & MS_RDONLY)
return 0;
lock_super(sb);
/* Reset the needs_recovery flag before the fs is unlocked. */
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
unlock_super(sb);
return 0;
}
/*
* Structure to save mount options for ext4_remount's benefit
*/
struct ext4_mount_options {
unsigned long s_mount_opt;
unsigned long s_mount_opt2;
uid_t s_resuid;
gid_t s_resgid;
unsigned long s_commit_interval;
u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
int s_jquota_fmt;
char *s_qf_names[MAXQUOTAS];
#endif
};
static int ext4_remount(struct super_block *sb, int *flags, char *data)
{
struct ext4_super_block *es;
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t n_blocks_count = 0;
unsigned long old_sb_flags;
struct ext4_mount_options old_opts;
int enable_quota = 0;
ext4_group_t g;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
int err = 0;
#ifdef CONFIG_QUOTA
int i;
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
/* Store the original options */
lock_super(sb);
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
old_opts.s_mount_opt2 = sbi->s_mount_opt2;
old_opts.s_resuid = sbi->s_resuid;
old_opts.s_resgid = sbi->s_resgid;
old_opts.s_commit_interval = sbi->s_commit_interval;
old_opts.s_min_batch_time = sbi->s_min_batch_time;
old_opts.s_max_batch_time = sbi->s_max_batch_time;
#ifdef CONFIG_QUOTA
old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++)
old_opts.s_qf_names[i] = sbi->s_qf_names[i];
#endif
if (sbi->s_journal && sbi->s_journal->j_task->io_context)
journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
/*
* Allow the "check" option to be passed as a remount option.
*/
if (!parse_options(data, sb, NULL, &journal_ioprio,
&n_blocks_count, 1)) {
err = -EINVAL;
goto restore_opts;
}
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
es = sbi->s_es;
if (sbi->s_journal) {
ext4_init_journal_params(sb, sbi->s_journal);
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
}
if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
n_blocks_count > ext4_blocks_count(es)) {
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
err = -EROFS;
goto restore_opts;
}
if (*flags & MS_RDONLY) {
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
/*
* First of all, the unconditional stuff we have to do
* to disable replay of the journal when we next remount
*/
sb->s_flags |= MS_RDONLY;
/*
* OK, test if we are remounting a valid rw partition
* readonly, and if so set the rdonly flag and then
* mark the partition as valid again.
*/
if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
(sbi->s_mount_state & EXT4_VALID_FS))
es->s_state = cpu_to_le16(sbi->s_mount_state);
if (sbi->s_journal)
ext4_mark_recovery_complete(sb, es);
} else {
/* Make sure we can mount this feature set readwrite */
if (!ext4_feature_set_ok(sb, 0)) {
err = -EROFS;
goto restore_opts;
}
/*
* Make sure the group descriptor checksums
* are sane. If they aren't, refuse to remount r/w.
*/
for (g = 0; g < sbi->s_groups_count; g++) {
struct ext4_group_desc *gdp =
ext4_get_group_desc(sb, g, NULL);
if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
ext4_msg(sb, KERN_ERR,
"ext4_remount: Checksum for group %u failed (%u!=%u)",
g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
le16_to_cpu(gdp->bg_checksum));
err = -EINVAL;
goto restore_opts;
}
}
/*
* If we have an unprocessed orphan list hanging
* around from a previously readonly bdev mount,
* require a full umount/remount for now.
*/
if (es->s_last_orphan) {
ext4_msg(sb, KERN_WARNING, "Couldn't "
"remount RDWR because of unprocessed "
"orphan inode list. Please "
"umount/remount instead");
err = -EINVAL;
goto restore_opts;
}
/*
* Mounting a RDONLY partition read-write, so reread
* and store the current valid flag. (It may have
* been changed by e2fsck since we originally mounted
* the partition.)
*/
if (sbi->s_journal)
ext4_clear_journal_err(sb, es);
sbi->s_mount_state = le16_to_cpu(es->s_state);
if ((err = ext4_group_extend(sb, es, n_blocks_count)))
goto restore_opts;
if (!ext4_setup_super(sb, es, 0))
sb->s_flags &= ~MS_RDONLY;
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_MMP))
if (ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block))) {
err = -EROFS;
goto restore_opts;
}
enable_quota = 1;
}
}
/*
* Reinitialize lazy itable initialization thread based on
* current settings
*/
if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
ext4_unregister_li_request(sb);
else {
ext4_group_t first_not_zeroed;
first_not_zeroed = ext4_has_uninit_itable(sb);
ext4_register_li_request(sb, first_not_zeroed);
}
ext4_setup_system_zone(sb);
if (sbi->s_journal == NULL)
ext4_commit_super(sb, 1);
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
if (old_opts.s_qf_names[i] &&
old_opts.s_qf_names[i] != sbi->s_qf_names[i])
kfree(old_opts.s_qf_names[i]);
#endif
unlock_super(sb);
if (enable_quota)
dquot_resume(sb, -1);
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
return 0;
restore_opts:
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_mount_opt2 = old_opts.s_mount_opt2;
sbi->s_resuid = old_opts.s_resuid;
sbi->s_resgid = old_opts.s_resgid;
sbi->s_commit_interval = old_opts.s_commit_interval;
sbi->s_min_batch_time = old_opts.s_min_batch_time;
sbi->s_max_batch_time = old_opts.s_max_batch_time;
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) {
if (sbi->s_qf_names[i] &&
old_opts.s_qf_names[i] != sbi->s_qf_names[i])
kfree(sbi->s_qf_names[i]);
sbi->s_qf_names[i] = old_opts.s_qf_names[i];
}
#endif
unlock_super(sb);
kfree(orig_data);
return err;
}
/*
* Note: calculating the overhead so we can be compatible with
* historical BSD practice is quite difficult in the face of
* clusters/bigalloc. This is because multiple metadata blocks from
* different block group can end up in the same allocation cluster.
* Calculating the exact overhead in the face of clustered allocation
* requires either O(all block bitmaps) in memory or O(number of block
* groups**2) in time. We will still calculate the superblock for
* older file systems --- and if we come across with a bigalloc file
* system with zero in s_overhead_clusters the estimate will be close to
* correct especially for very large cluster sizes --- but for newer
* file systems, it's better to calculate this figure once at mkfs
* time, and store it in the superblock. If the superblock value is
* present (even for non-bigalloc file systems), we will use it.
*/
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
struct ext4_group_desc *gdp;
u64 fsid;
s64 bfree;
if (test_opt(sb, MINIX_DF)) {
sbi->s_overhead_last = 0;
} else if (es->s_overhead_clusters) {
sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
} else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
/*
* Compute the overhead (FS structures). This is constant
* for a given filesystem unless the number of block groups
* changes so we cache the previous value until it does.
*/
/*
* All of the blocks before first_data_block are
* overhead
*/
overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
/*
* Add the overhead found in each block group
*/
for (i = 0; i < ngroups; i++) {
gdp = ext4_get_group_desc(sb, i, NULL);
overhead += ext4_num_overhead_clusters(sb, i, gdp);
cond_resched();
}
sbi->s_overhead_last = overhead;
smp_wmb();
sbi->s_blocks_last = ext4_blocks_count(es);
}
buf->f_type = EXT4_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (ext4_blocks_count(es) -
EXT4_C2B(sbi, sbi->s_overhead_last));
bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
/* prevent underflow in case that few free space is available */
buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
if (buf->f_bfree < ext4_r_blocks_count(es))
buf->f_bavail = 0;
buf->f_files = le32_to_cpu(es->s_inodes_count);
buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
buf->f_namelen = EXT4_NAME_LEN;
fsid = le64_to_cpup((void *)es->s_uuid) ^
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
return 0;
}
/* Helper function for writing quotas on sync - we need to start transaction
* before quota file is locked for write. Otherwise the are possible deadlocks:
* Process 1 Process 2
* ext4_create() quota_sync()
* jbd2_journal_start() write_dquot()
* dquot_initialize() down(dqio_mutex)
* down(dqio_mutex) jbd2_journal_start()
*
*/
#ifdef CONFIG_QUOTA
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
static int ext4_write_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
struct inode *inode;
inode = dquot_to_inode(dquot);
handle = ext4_journal_start(inode,
EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit(dquot);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext4_acquire_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
handle = ext4_journal_start(dquot_to_inode(dquot),
EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_acquire(dquot);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext4_release_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
handle = ext4_journal_start(dquot_to_inode(dquot),
EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle)) {
/* Release dquot anyway to avoid endless cycle in dqput() */
dquot_release(dquot);
return PTR_ERR(handle);
}
ret = dquot_release(dquot);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
static int ext4_mark_dquot_dirty(struct dquot *dquot)
{
/* Are we journaling quotas? */
if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
dquot_mark_dquot_dirty(dquot);
return ext4_write_dquot(dquot);
} else {
return dquot_mark_dquot_dirty(dquot);
}
}
static int ext4_write_info(struct super_block *sb, int type)
{
int ret, err;
handle_t *handle;
/* Data block + inode block */
handle = ext4_journal_start(sb->s_root->d_inode, 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
/*
* Turn on quotas during mount time - we need to find
* the quota file and such...
*/
static int ext4_quota_on_mount(struct super_block *sb, int type)
{
return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
EXT4_SB(sb)->s_jquota_fmt, type);
}
/*
* Standard function to be called on quota_on
*/
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
struct path *path)
{
int err;
if (!test_opt(sb, QUOTA))
return -EINVAL;
/* Quotafile not on the same filesystem? */
if (path->mnt->mnt_sb != sb)
return -EXDEV;
/* Journaling quota? */
if (EXT4_SB(sb)->s_qf_names[type]) {
/* Quotafile not in fs root? */
if (path->dentry->d_parent != sb->s_root)
ext4_msg(sb, KERN_WARNING,
"Quota file not on filesystem root. "
"Journaled quota will not work");
}
/*
* When we journal data on quota file, we have to flush journal to see
* all updates to the file when we bypass pagecache...
*/
if (EXT4_SB(sb)->s_journal &&
ext4_should_journal_data(path->dentry->d_inode)) {
/*
* We don't need to lock updates but journal_flush() could
* otherwise be livelocked...
*/
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
if (err)
return err;
}
return dquot_quota_on(sb, type, format_id, path);
}
static int ext4_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
handle_t *handle;
/* Force all delayed allocation blocks to be allocated.
* Caller already holds s_umount sem */
if (test_opt(sb, DELALLOC))
sync_filesystem(sb);
if (!inode)
goto out;
/* Update modification times of quota files when userspace can
* start looking at them */
handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle))
goto out;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out:
return dquot_quota_off(sb, type);
}
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
struct buffer_head *bh;
loff_t i_size = i_size_read(inode);
if (off > i_size)
return 0;
if (off+len > i_size)
len = i_size-off;
toread = len;
while (toread > 0) {
tocopy = sb->s_blocksize - offset < toread ?
sb->s_blocksize - offset : toread;
bh = ext4_bread(NULL, inode, blk, 0, &err);
if (err)
return err;
if (!bh) /* A hole? */
memset(data, 0, tocopy);
else
memcpy(data, bh->b_data+offset, tocopy);
brelse(bh);
offset = 0;
toread -= tocopy;
data += tocopy;
blk++;
}
return len;
}
/* Write to quotafile (we know the transaction is already started and has
* enough credits) */
static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
if (EXT4_SB(sb)->s_journal && !handle) {
ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
" cancelled because transaction is not started",
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
/*
* Since we account only one data block in transaction credits,
* then it is impossible to cross a block boundary.
*/
if (sb->s_blocksize - offset < len) {
ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
" cancelled because not block aligned",
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
bh = ext4_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
err = ext4_journal_get_write_access(handle, bh);
if (err) {
brelse(bh);
goto out;
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, len);
flush_dcache_page(bh->b_page);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
out:
if (err) {
mutex_unlock(&inode->i_mutex);
return err;
}
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
ext4_mark_inode_dirty(handle, inode);
}
mutex_unlock(&inode->i_mutex);
return len;
}
#endif
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
}
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static inline void register_as_ext2(void)
{
int err = register_filesystem(&ext2_fs_type);
if (err)
printk(KERN_WARNING
"EXT4-fs: Unable to register as ext2 (%d)\n", err);
}
static inline void unregister_as_ext2(void)
{
unregister_filesystem(&ext2_fs_type);
}
static inline int ext2_feature_set_ok(struct super_block *sb)
{
if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP))
return 0;
if (sb->s_flags & MS_RDONLY)
return 1;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))
return 0;
return 1;
}
MODULE_ALIAS("ext2");
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
#endif
#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static inline void register_as_ext3(void)
{
int err = register_filesystem(&ext3_fs_type);
if (err)
printk(KERN_WARNING
"EXT4-fs: Unable to register as ext3 (%d)\n", err);
}
static inline void unregister_as_ext3(void)
{
unregister_filesystem(&ext3_fs_type);
}
static inline int ext3_feature_set_ok(struct super_block *sb)
{
if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP))
return 0;
if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
return 0;
if (sb->s_flags & MS_RDONLY)
return 1;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))
return 0;
return 1;
}
MODULE_ALIAS("ext3");
#else
static inline void register_as_ext3(void) { }
static inline void unregister_as_ext3(void) { }
static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; }
#endif
static struct file_system_type ext4_fs_type = {
.owner = THIS_MODULE,
.name = "ext4",
.mount = ext4_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
static int __init ext4_init_feat_adverts(void)
{
struct ext4_features *ef;
int ret = -ENOMEM;
ef = kzalloc(sizeof(struct ext4_features), GFP_KERNEL);
if (!ef)
goto out;
ef->f_kobj.kset = ext4_kset;
init_completion(&ef->f_kobj_unregister);
ret = kobject_init_and_add(&ef->f_kobj, &ext4_feat_ktype, NULL,
"features");
if (ret) {
kfree(ef);
goto out;
}
ext4_feat = ef;
ret = 0;
out:
return ret;
}
static void ext4_exit_feat_adverts(void)
{
kobject_put(&ext4_feat->f_kobj);
wait_for_completion(&ext4_feat->f_kobj_unregister);
kfree(ext4_feat);
}
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
static int __init ext4_init_fs(void)
{
int i, err;
ext4_check_flag_values();
for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
mutex_init(&ext4__aio_mutex[i]);
init_waitqueue_head(&ext4__ioend_wq[i]);
}
err = ext4_init_pageio();
if (err)
return err;
err = ext4_init_system_zone();
if (err)
goto out6;
ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
if (!ext4_kset)
goto out5;
ext4_proc_root = proc_mkdir("fs/ext4", NULL);
err = ext4_init_feat_adverts();
if (err)
goto out4;
err = ext4_init_mballoc();
if (err)
goto out3;
err = ext4_init_xattr();
if (err)
goto out2;
err = init_inodecache();
if (err)
goto out1;
register_as_ext3();
register_as_ext2();
err = register_filesystem(&ext4_fs_type);
if (err)
goto out;
ext4_li_info = NULL;
mutex_init(&ext4_li_mtx);
return 0;
out:
unregister_as_ext2();
unregister_as_ext3();
destroy_inodecache();
out1:
ext4_exit_xattr();
out2:
ext4_exit_mballoc();
out3:
ext4_exit_feat_adverts();
out4:
if (ext4_proc_root)
remove_proc_entry("fs/ext4", NULL);
kset_unregister(ext4_kset);
out5:
ext4_exit_system_zone();
out6:
ext4_exit_pageio();
return err;
}
static void __exit ext4_exit_fs(void)
{
ext4_destroy_lazyinit_thread();
unregister_as_ext2();
unregister_as_ext3();
unregister_filesystem(&ext4_fs_type);
destroy_inodecache();
ext4_exit_xattr();
ext4_exit_mballoc();
ext4_exit_feat_adverts();
remove_proc_entry("fs/ext4", NULL);
kset_unregister(ext4_kset);
ext4_exit_system_zone();
ext4_exit_pageio();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
MODULE_DESCRIPTION("Fourth Extended Filesystem");
MODULE_LICENSE("GPL");
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_3631_0 |
crossvul-cpp_data_good_5797_0 |
/*
* Local APIC virtualization
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright (C) 2007 Novell
* Copyright (C) 2007 Intel
* Copyright 2009 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Dor Laor <dor.laor@qumranet.com>
* Gregory Haskins <ghaskins@novell.com>
* Yaozu (Eddie) Dong <eddie.dong@intel.com>
*
* Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include "kvm_cache_regs.h"
#include "irq.h"
#include "trace.h"
#include "x86.h"
#include "cpuid.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else
#define mod_64(x, y) ((x) % (y))
#endif
#define PRId64 "d"
#define PRIx64 "llx"
#define PRIu64 "u"
#define PRIo64 "o"
#define APIC_BUS_CYCLE_NS 1
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
#define apic_debug(fmt, arg...)
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
#define LAPIC_MMIO_LENGTH (1 << 12)
/* followed define is not in apicdef.h */
#define APIC_SHORT_MASK 0xc0000
#define APIC_DEST_NOSHORT 0x0
#define APIC_DEST_MASK 0x800
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
static unsigned int min_timer_period_us = 500;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
*((u32 *) (apic->regs + reg_off)) = val;
}
static inline int apic_test_vector(int vec, void *bitmap)
{
return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return apic_test_vector(vector, apic->regs + APIC_ISR) ||
apic_test_vector(vector, apic->regs + APIC_IRR);
}
static inline void apic_set_vector(int vec, void *bitmap)
{
set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline void apic_clear_vector(int vec, void *bitmap)
{
clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline int __apic_test_and_set_vector(int vec, void *bitmap)
{
return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
{
return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
struct static_key_deferred apic_hw_disabled __read_mostly;
struct static_key_deferred apic_sw_disabled __read_mostly;
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{
if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) {
if (val & APIC_SPIV_APIC_ENABLED)
static_key_slow_dec_deferred(&apic_sw_disabled);
else
static_key_slow_inc(&apic_sw_disabled.key);
}
apic_set_reg(apic, APIC_SPIV, val);
}
static inline int apic_enabled(struct kvm_lapic *apic)
{
return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
}
#define LVT_MASK \
(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
#define LINT_MASK \
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
static inline int kvm_apic_id(struct kvm_lapic *apic)
{
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
#define KVM_X2APIC_CID_BITS 0
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
mutex_lock(&kvm->arch.apic_map_lock);
if (!new)
goto out;
new->ldr_bits = 8;
/* flat mode is default */
new->cid_shift = 8;
new->cid_mask = 0;
new->lid_mask = 0xff;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
u32 ldr;
if (!kvm_apic_present(vcpu))
continue;
/*
* All APICs have to be configured in the same mode by an OS.
* We take advatage of this while building logical id loockup
* table. After reset APICs are in xapic/flat mode, so if we
* find apic with different setting we assume this is the mode
* OS wants all apics to be in; build lookup table accordingly.
*/
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
new->cid_shift = 4;
new->cid_mask = 0xf;
new->lid_mask = 0xf;
}
new->phys_map[kvm_apic_id(apic)] = apic;
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
lid = apic_logical_id(new, ldr);
if (lid)
new->logical_map[cid][ffs(lid) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
kfree_rcu(old, rcu);
kvm_vcpu_request_scan_ioapic(kvm);
}
static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
{
apic_set_reg(apic, APIC_ID, id << 24);
recalculate_apic_map(apic->vcpu->kvm);
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
apic_set_reg(apic, APIC_LDR, id);
recalculate_apic_map(apic->vcpu->kvm);
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
{
return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
}
static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
{
return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
}
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
}
static inline int apic_lvtt_period(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
}
static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{
return ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) ==
APIC_LVT_TIMER_TSCDEADLINE);
}
static inline int apic_lvt_nmi_mode(u32 lvt_val)
{
return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
}
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *feat;
u32 v = APIC_VERSION;
if (!kvm_vcpu_has_lapic(vcpu))
return;
feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
v |= APIC_LVR_DIRECTED_EOI;
apic_set_reg(apic, APIC_LVR, v);
}
static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
LVT_MASK | APIC_MODE_MASK, /* LVTPC */
LINT_MASK, LINT_MASK, /* LVT0-1 */
LVT_MASK /* LVTERR */
};
static int find_highest_vector(void *bitmap)
{
int vec;
u32 *reg;
for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
vec >= 0; vec -= APIC_VECTORS_PER_REG) {
reg = bitmap + REG_POS(vec);
if (*reg)
return fls(*reg) - 1 + vec;
}
return -1;
}
static u8 count_vectors(void *bitmap)
{
int vec;
u32 *reg;
u8 count = 0;
for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
reg = bitmap + REG_POS(vec);
count += hweight32(*reg);
}
return count;
}
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
{
u32 i, pir_val;
struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i <= 7; i++) {
pir_val = xchg(&pir[i], 0);
if (pir_val)
*((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
}
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = true;
apic_set_vector(vec, apic->regs + APIC_IRR);
}
static inline int apic_search_irr(struct kvm_lapic *apic)
{
return find_highest_vector(apic->regs + APIC_IRR);
}
static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{
int result;
/*
* Note that irr_pending is just a hint. It will be always
* true with virtual interrupt delivery enabled.
*/
if (!apic->irr_pending)
return -1;
kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
result = apic_search_irr(apic);
ASSERT(result == -1 || result >= 16);
return result;
}
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = false;
apic_clear_vector(vec, apic->regs + APIC_IRR);
if (apic_search_irr(apic) != -1)
apic->irr_pending = true;
}
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
/*
* ISR (in service register) bit is set when injecting an interrupt.
* The highest vector is injected. Thus the latest bit set matches
* the highest bit in ISR.
*/
apic->highest_isr_cache = vec;
}
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{
if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
apic->highest_isr_cache = -1;
}
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
int highest_irr;
/* This may race with setting of irr in __apic_accept_irq() and
* value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
* will cause vmexit immediately and the value will be recalculated
* on the next vmentry.
*/
if (!kvm_vcpu_has_lapic(vcpu))
return 0;
highest_irr = apic_find_highest_irr(vcpu->arch.apic);
return highest_irr;
}
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
unsigned long *dest_map);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
unsigned long *dest_map)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
irq->level, irq->trig_mode, dest_map);
}
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
sizeof(val));
}
static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
{
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
sizeof(*val));
}
static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
apic_debug("Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return val & 0x1;
}
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
apic_debug("Can't set EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return;
}
__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
apic_debug("Can't clear EOI MSR value: 0x%llx\n",
(unsigned long long)vcpi->arch.pv_eoi.msr_val);
return;
}
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{
int result;
/* Note that isr_count is always 1 with vid enabled */
if (!apic->isr_count)
return -1;
if (likely(apic->highest_isr_cache != -1))
return apic->highest_isr_cache;
result = find_highest_vector(apic->regs + APIC_ISR);
ASSERT(result == -1 || result >= 16);
return result;
}
void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int i;
for (i = 0; i < 8; i++)
apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]);
}
static void apic_update_ppr(struct kvm_lapic *apic)
{
u32 tpr, isrv, ppr, old_ppr;
int isr;
old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI);
tpr = kvm_apic_get_reg(apic, APIC_TASKPRI);
isr = apic_find_highest_isr(apic);
isrv = (isr != -1) ? isr : 0;
if ((tpr & 0xf0) >= (isrv & 0xf0))
ppr = tpr & 0xff;
else
ppr = isrv & 0xf0;
apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
apic, ppr, isr, isrv);
if (old_ppr != ppr) {
apic_set_reg(apic, APIC_PROCPRI, ppr);
if (ppr < old_ppr)
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
}
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
apic_set_reg(apic, APIC_TASKPRI, tpr);
apic_update_ppr(apic);
}
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
{
return dest == 0xff || kvm_apic_id(apic) == dest;
}
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
{
int result = 0;
u32 logical_id;
if (apic_x2apic_mode(apic)) {
logical_id = kvm_apic_get_reg(apic, APIC_LDR);
return logical_id & mda;
}
logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR));
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
if (logical_id & mda)
result = 1;
break;
case APIC_DFR_CLUSTER:
if (((logical_id >> 4) == (mda >> 0x4))
&& (logical_id & mda & 0xf))
result = 1;
break;
default:
apic_debug("Bad DFR vcpu %d: %08x\n",
apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
break;
}
return result;
}
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode)
{
int result = 0;
struct kvm_lapic *target = vcpu->arch.apic;
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
target, source, dest, dest_mode, short_hand);
ASSERT(target);
switch (short_hand) {
case APIC_DEST_NOSHORT:
if (dest_mode == 0)
/* Physical mode. */
result = kvm_apic_match_physical_addr(target, dest);
else
/* Logical mode. */
result = kvm_apic_match_logical_addr(target, dest);
break;
case APIC_DEST_SELF:
result = (target == source);
break;
case APIC_DEST_ALLINC:
result = 1;
break;
case APIC_DEST_ALLBUT:
result = (target != source);
break;
default:
apic_debug("kvm: apic: Bad dest shorthand value %x\n",
short_hand);
break;
}
return result;
}
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
{
struct kvm_apic_map *map;
unsigned long bitmap = 1;
struct kvm_lapic **dst;
int i;
bool ret = false;
*r = -1;
if (irq->shorthand == APIC_DEST_SELF) {
*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
return true;
}
if (irq->shorthand)
return false;
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
if (!map)
goto out;
if (irq->dest_mode == 0) { /* physical mode */
if (irq->delivery_mode == APIC_DM_LOWEST ||
irq->dest_id == 0xff)
goto out;
dst = &map->phys_map[irq->dest_id & 0xff];
} else {
u32 mda = irq->dest_id << (32 - map->ldr_bits);
dst = map->logical_map[apic_cluster_id(map, mda)];
bitmap = apic_logical_id(map, mda);
if (irq->delivery_mode == APIC_DM_LOWEST) {
int l = -1;
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
if (l < 0)
l = i;
else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
l = i;
}
bitmap = (l >= 0) ? 1 << l : 0;
}
}
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
if (*r < 0)
*r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
ret = true;
out:
rcu_read_unlock();
return ret;
}
/*
* Add a pending IRQ into lapic.
* Return 1 if successfully added and 0 if discarded.
*/
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
unsigned long *dest_map)
{
int result = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
switch (delivery_mode) {
case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++;
case APIC_DM_FIXED:
/* FIXME add logic for vcpu on reset */
if (unlikely(!apic_enabled(apic)))
break;
result = 1;
if (dest_map)
__set_bit(vcpu->vcpu_id, dest_map);
if (kvm_x86_ops->deliver_posted_interrupt)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
apic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
trig_mode, vector, false);
break;
case APIC_DM_REMRD:
result = 1;
vcpu->arch.pv.pv_unhalted = 1;
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_SMI:
apic_debug("Ignoring guest SMI\n");
break;
case APIC_DM_NMI:
result = 1;
kvm_inject_nmi(vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_INIT:
if (!trig_mode || level) {
result = 1;
/* assumes that there are only KVM_APIC_INIT/SIPI */
apic->pending_events = (1UL << KVM_APIC_INIT);
/* make sure pending_events is visible before sending
* the request */
smp_wmb();
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
} else {
apic_debug("Ignoring de-assert INIT to vcpu %d\n",
vcpu->vcpu_id);
}
break;
case APIC_DM_STARTUP:
apic_debug("SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector);
result = 1;
apic->sipi_vector = vector;
/* make sure sipi_vector is visible for the receiver */
smp_wmb();
set_bit(KVM_APIC_SIPI, &apic->pending_events);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_EXTINT:
/*
* Should only be called by kvm_apic_local_deliver() with LVT0,
* before NMI watchdog was enabled. Already handled by
* kvm_apic_accept_pic_intr().
*/
break;
default:
printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
delivery_mode);
break;
}
return result;
}
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
}
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;
else
trigger_mode = IOAPIC_EDGE_TRIG;
kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
}
}
static int apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
trace_kvm_eoi(apic, vector);
/*
* Not every write EOI will has corresponding ISR,
* one example is when Kernel check timer on setup_IO_APIC
*/
if (vector == -1)
return vector;
apic_clear_isr(vector, apic);
apic_update_ppr(apic);
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
return vector;
}
/*
* this interface assumes a trap-like exit, which has already finished
* desired side effect including vISR and vPPR update.
*/
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_lapic *apic = vcpu->arch.apic;
trace_kvm_eoi(apic, vector);
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
static void apic_send_ipi(struct kvm_lapic *apic)
{
u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2);
struct kvm_lapic_irq irq;
irq.vector = icr_low & APIC_VECTOR_MASK;
irq.delivery_mode = icr_low & APIC_MODE_MASK;
irq.dest_mode = icr_low & APIC_DEST_MASK;
irq.level = icr_low & APIC_INT_ASSERT;
irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
irq.shorthand = icr_low & APIC_SHORT_MASK;
if (apic_x2apic_mode(apic))
irq.dest_id = icr_high;
else
irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
trace_kvm_apic_ipi(icr_low, irq.dest_id);
apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
"dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
icr_high, icr_low, irq.shorthand, irq.dest_id,
irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector);
kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
}
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
ktime_t remaining;
s64 ns;
u32 tmcct;
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
apic->lapic_timer.period == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
tmcct = div64_u64(ns,
(APIC_BUS_CYCLE_NS * apic->divide_count));
return tmcct;
}
static void __report_tpr_access(struct kvm_lapic *apic, bool write)
{
struct kvm_vcpu *vcpu = apic->vcpu;
struct kvm_run *run = vcpu->run;
kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
run->tpr_access.rip = kvm_rip_read(vcpu);
run->tpr_access.is_write = write;
}
static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
{
if (apic->vcpu->arch.tpr_access_reporting)
__report_tpr_access(apic, write);
}
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{
u32 val = 0;
if (offset >= LAPIC_MMIO_LENGTH)
return 0;
switch (offset) {
case APIC_ID:
if (apic_x2apic_mode(apic))
val = kvm_apic_id(apic);
else
val = kvm_apic_id(apic) << 24;
break;
case APIC_ARBPRI:
apic_debug("Access APIC ARBPRI register which is for P6\n");
break;
case APIC_TMCCT: /* Timer CCR */
if (apic_lvtt_tscdeadline(apic))
return 0;
val = apic_get_tmcct(apic);
break;
case APIC_PROCPRI:
apic_update_ppr(apic);
val = kvm_apic_get_reg(apic, offset);
break;
case APIC_TASKPRI:
report_tpr_access(apic, false);
/* fall thru */
default:
val = kvm_apic_get_reg(apic, offset);
break;
}
return val;
}
static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
{
return container_of(dev, struct kvm_lapic, dev);
}
static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
void *data)
{
unsigned char alignment = offset & 0xf;
u32 result;
/* this bitmask has a bit cleared for each reserved register */
static const u64 rmask = 0x43ff01ffffffe70cULL;
if ((alignment + len) > 4) {
apic_debug("KVM_APIC_READ: alignment error %x %d\n",
offset, len);
return 1;
}
if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
apic_debug("KVM_APIC_READ: read reserved register %x\n",
offset);
return 1;
}
result = __apic_read(apic, offset & ~0xf);
trace_kvm_apic_read(offset, result);
switch (len) {
case 1:
case 2:
case 4:
memcpy(data, (char *)&result + alignment, len);
break;
default:
printk(KERN_ERR "Local APIC read with len = %x, "
"should be 1,2, or 4 instead\n", len);
break;
}
return 0;
}
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
return kvm_apic_hw_enabled(apic) &&
addr >= apic->base_address &&
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_io_device *this,
gpa_t address, int len, void *data)
{
struct kvm_lapic *apic = to_lapic(this);
u32 offset = address - apic->base_address;
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
apic_reg_read(apic, offset, len, data);
return 0;
}
static void update_divide_count(struct kvm_lapic *apic)
{
u32 tmp1, tmp2, tdcr;
tdcr = kvm_apic_get_reg(apic, APIC_TDCR);
tmp1 = tdcr & 0xf;
tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
apic->divide_count = 0x1 << (tmp2 & 0x7);
apic_debug("timer divide count is 0x%x\n",
apic->divide_count);
}
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
atomic_set(&apic->lapic_timer.pending, 0);
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
/* lapic timer in oneshot or periodic mode */
now = apic->lapic_timer.timer.base->get_time();
apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
* APIC_BUS_CYCLE_NS * apic->divide_count;
if (!apic->lapic_timer.period)
return;
/*
* Do not allow the guest to program periodic timers with small
* interval, since the hrtimers are not throttled by the host
* scheduler.
*/
if (apic_lvtt_period(apic)) {
s64 min_period = min_timer_period_us * 1000LL;
if (apic->lapic_timer.period < min_period) {
pr_info_ratelimited(
"kvm: vcpu %i: requested %lld ns "
"lapic timer period limited to %lld ns\n",
apic->vcpu->vcpu_id,
apic->lapic_timer.period, min_period);
apic->lapic_timer.period = min_period;
}
}
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, apic->lapic_timer.period),
HRTIMER_MODE_ABS);
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
"timer initial count 0x%x, period %lldns, "
"expire @ 0x%016" PRIx64 ".\n", __func__,
APIC_BUS_CYCLE_NS, ktime_to_ns(now),
kvm_apic_get_reg(apic, APIC_TMICT),
apic->lapic_timer.period,
ktime_to_ns(ktime_add_ns(now,
apic->lapic_timer.period)));
} else if (apic_lvtt_tscdeadline(apic)) {
/* lapic timer in tsc deadline mode */
u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
u64 ns = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;
if (unlikely(!tscdeadline || !this_tsc_khz))
return;
local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time();
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
}
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
local_irq_restore(flags);
}
}
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{
int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0));
if (apic_lvt_nmi_mode(lvt0_val)) {
if (!nmi_wd_enabled) {
apic_debug("Receive NMI setting on APIC_LVT0 "
"for cpu %d\n", apic->vcpu->vcpu_id);
apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
}
} else if (nmi_wd_enabled)
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
}
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
{
int ret = 0;
trace_kvm_apic_write(reg, val);
switch (reg) {
case APIC_ID: /* Local APIC ID */
if (!apic_x2apic_mode(apic))
kvm_apic_set_id(apic, val >> 24);
else
ret = 1;
break;
case APIC_TASKPRI:
report_tpr_access(apic, true);
apic_set_tpr(apic, val & 0xff);
break;
case APIC_EOI:
apic_set_eoi(apic);
break;
case APIC_LDR:
if (!apic_x2apic_mode(apic))
kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
else
ret = 1;
break;
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
recalculate_apic_map(apic->vcpu->kvm);
} else
ret = 1;
break;
case APIC_SPIV: {
u32 mask = 0x3ff;
if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
mask |= APIC_SPIV_DIRECTED_EOI;
apic_set_spiv(apic, val & mask);
if (!(val & APIC_SPIV_APIC_ENABLED)) {
int i;
u32 lvt_val;
for (i = 0; i < APIC_LVT_NUM; i++) {
lvt_val = kvm_apic_get_reg(apic,
APIC_LVTT + 0x10 * i);
apic_set_reg(apic, APIC_LVTT + 0x10 * i,
lvt_val | APIC_LVT_MASKED);
}
atomic_set(&apic->lapic_timer.pending, 0);
}
break;
}
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
apic_send_ipi(apic);
break;
case APIC_ICR2:
if (!apic_x2apic_mode(apic))
val &= 0xff000000;
apic_set_reg(apic, APIC_ICR2, val);
break;
case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val);
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
case APIC_LVTERR:
/* TODO: Check vector */
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
apic_set_reg(apic, reg, val);
break;
case APIC_LVTT:
if ((kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask) !=
(val & apic->lapic_timer.timer_mode_mask))
hrtimer_cancel(&apic->lapic_timer.timer);
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
apic_set_reg(apic, APIC_LVTT, val);
break;
case APIC_TMICT:
if (apic_lvtt_tscdeadline(apic))
break;
hrtimer_cancel(&apic->lapic_timer.timer);
apic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
case APIC_TDCR:
if (val & 4)
apic_debug("KVM_WRITE:TDCR %x\n", val);
apic_set_reg(apic, APIC_TDCR, val);
update_divide_count(apic);
break;
case APIC_ESR:
if (apic_x2apic_mode(apic) && val != 0) {
apic_debug("KVM_WRITE:ESR not zero %x\n", val);
ret = 1;
}
break;
case APIC_SELF_IPI:
if (apic_x2apic_mode(apic)) {
apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
} else
ret = 1;
break;
default:
ret = 1;
break;
}
if (ret)
apic_debug("Local APIC Write to read-only register %x\n", reg);
return ret;
}
static int apic_mmio_write(struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
struct kvm_lapic *apic = to_lapic(this);
unsigned int offset = address - apic->base_address;
u32 val;
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
* Refer SDM 8.4.1
*/
if (len != 4 || (offset & 0xf)) {
/* Don't shout loud, $infamous_os would cause only noise. */
apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
return 0;
}
val = *(u32*)data;
/* too common printing */
if (offset != APIC_EOI)
apic_debug("%s: offset 0x%x with length 0x%x, and value is "
"0x%x\n", __func__, offset, len, val);
apic_reg_write(apic, offset & 0xff0, val);
return 0;
}
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_has_lapic(vcpu))
apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
u32 val = 0;
/* hw has done the conditional check and inst decode */
offset &= 0xff0;
apic_reg_read(vcpu->arch.apic, offset, 4, &val);
/* TODO: optimize to just emulate side effect w/o one more write */
apic_reg_write(vcpu->arch.apic, offset, val);
}
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
void kvm_free_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!vcpu->arch.apic)
return;
hrtimer_cancel(&apic->lapic_timer.timer);
if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
static_key_slow_dec_deferred(&apic_hw_disabled);
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
static_key_slow_dec_deferred(&apic_sw_disabled);
if (apic->regs)
free_page((unsigned long)apic->regs);
kfree(apic);
}
/*
*----------------------------------------------------------------------
* LAPIC interface
*----------------------------------------------------------------------
*/
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return 0;
return apic->lapic_timer.tscdeadline;
}
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;
hrtimer_cancel(&apic->lapic_timer.timer);
apic->lapic_timer.tscdeadline = data;
start_apic_timer(apic);
}
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return;
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
}
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
u64 tpr;
if (!kvm_vcpu_has_lapic(vcpu))
return 0;
tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
return (tpr & 0xf0) >> 4;
}
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
u64 old_value = vcpu->arch.apic_base;
struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic) {
value |= MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
return;
}
/* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else
static_key_slow_inc(&apic_hw_disabled.key);
recalculate_apic_map(vcpu->kvm);
}
if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
kvm_apic_set_ldr(apic, ldr);
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
} else
kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
}
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
/* with FSB delivery interrupt, we can restart APIC functionality */
apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
"0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
}
void kvm_lapic_reset(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
int i;
apic_debug("%s\n", __func__);
ASSERT(vcpu);
apic = vcpu->arch.apic;
ASSERT(apic != NULL);
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
kvm_apic_set_id(apic, vcpu->vcpu_id);
kvm_apic_set_version(apic->vcpu);
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_set_reg(apic, APIC_DFR, 0xffffffffU);
apic_set_spiv(apic, 0xff);
apic_set_reg(apic, APIC_TASKPRI, 0);
kvm_apic_set_ldr(apic, 0);
apic_set_reg(apic, APIC_ESR, 0);
apic_set_reg(apic, APIC_ICR, 0);
apic_set_reg(apic, APIC_ICR2, 0);
apic_set_reg(apic, APIC_TDCR, 0);
apic_set_reg(apic, APIC_TMICT, 0);
for (i = 0; i < 8; i++) {
apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
apic->highest_isr_cache = -1;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
if (kvm_vcpu_is_bsp(vcpu))
kvm_lapic_set_base(vcpu,
vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address);
}
/*
*----------------------------------------------------------------------
* timer interface
*----------------------------------------------------------------------
*/
static bool lapic_is_periodic(struct kvm_lapic *apic)
{
return apic_lvtt_period(apic);
}
int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) &&
apic_lvt_enabled(apic, APIC_LVTT))
return atomic_read(&apic->lapic_timer.pending);
return 0;
}
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = kvm_apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
NULL);
}
return 0;
}
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (apic)
kvm_apic_local_deliver(apic, APIC_LVT0);
}
static const struct kvm_io_device_ops apic_mmio_ops = {
.read = apic_mmio_read,
.write = apic_mmio_write,
};
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
struct kvm_vcpu *vcpu = apic->vcpu;
wait_queue_head_t *q = &vcpu->wq;
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest.
*/
if (!atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
/* FIXME: this code should not know anything about vcpus */
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
if (waitqueue_active(q))
wake_up_interruptible(q);
if (lapic_is_periodic(apic)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;
}
int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic;
ASSERT(vcpu != NULL);
apic_debug("apic_init %d\n", vcpu->vcpu_id);
apic = kzalloc(sizeof(*apic), GFP_KERNEL);
if (!apic)
goto nomem;
vcpu->arch.apic = apic;
apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
if (!apic->regs) {
printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
vcpu->vcpu_id);
goto nomem_free_apic;
}
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = apic_timer_fn;
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
* thinking that APIC satet has changed.
*/
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
kvm_lapic_set_base(vcpu,
APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
kvm_lapic_reset(vcpu);
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
return 0;
nomem_free_apic:
kfree(apic);
nomem:
return -ENOMEM;
}
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic))
return -1;
apic_update_ppr(apic);
highest_irr = apic_find_highest_irr(apic);
if ((highest_irr == -1) ||
((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
return -1;
return highest_irr;
}
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{
u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0);
int r = 0;
if (!kvm_apic_hw_enabled(vcpu->arch.apic))
r = 1;
if ((lvt0 & APIC_LVT_MASKED) == 0 &&
GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
r = 1;
return r;
}
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return;
if (atomic_read(&apic->lapic_timer.pending) > 0) {
kvm_apic_local_deliver(apic, APIC_LVTT);
atomic_set(&apic->lapic_timer.pending, 0);
}
}
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{
int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
if (vector == -1)
return -1;
apic_set_isr(vector, apic);
apic_update_ppr(apic);
apic_clear_irr(vector, apic);
return vector;
}
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
struct kvm_lapic *apic = vcpu->arch.apic;
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
/* call kvm_apic_set_id() to put apic into apic_map */
kvm_apic_set_id(apic, kvm_apic_id(apic));
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer);
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
1 : count_vectors(apic->regs + APIC_ISR);
apic->highest_isr_cache = -1;
kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic));
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_rtc_eoi_tracking_restore_one(vcpu);
}
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
struct hrtimer *timer;
if (!kvm_vcpu_has_lapic(vcpu))
return;
timer = &vcpu->arch.apic->lapic_timer.timer;
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/*
* apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
*
* Detect whether guest triggered PV EOI since the
* last entry. If yes, set EOI on guests's behalf.
* Clear PV EOI in guest memory in any case.
*/
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
bool pending;
int vector;
/*
* PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
* and KVM_PV_EOI_ENABLED in guest memory as follows:
*
* KVM_APIC_PV_EOI_PENDING is unset:
* -> host disabled PV EOI.
* KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
* -> host enabled PV EOI, guest did not execute EOI yet.
* KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
* -> host enabled PV EOI, guest executed EOI.
*/
BUG_ON(!pv_eoi_enabled(vcpu));
pending = pv_eoi_get_pending(vcpu);
/*
* Clear pending bit in any case: it will be set again on vmentry.
* While this might not be ideal from performance point of view,
* this makes sure pv eoi is only enabled when we know it's safe.
*/
pv_eoi_clr_pending(vcpu);
if (pending)
return;
vector = apic_set_eoi(apic);
trace_kvm_pv_eoi(apic, vector);
}
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
sizeof(u32));
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
/*
* apic_sync_pv_eoi_to_guest - called before vmentry
*
* Detect whether it's safe to enable PV EOI and
* if yes do so.
*/
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
if (!pv_eoi_enabled(vcpu) ||
/* IRR set or many bits in ISR: could be nested. */
apic->irr_pending ||
/* Cache not set: could be safe but we don't bother. */
apic->highest_isr_cache == -1 ||
/* Need EOI to update ioapic. */
kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
/*
* PV EOI was disabled by apic_sync_pv_eoi_from_guest
* so we need not do anything here.
*/
return;
}
pv_eoi_set_pending(apic->vcpu);
}
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic;
apic_sync_pv_eoi_to_guest(vcpu, apic);
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff;
max_irr = apic_find_highest_irr(apic);
if (max_irr < 0)
max_irr = 0;
max_isr = apic_find_highest_isr(apic);
if (max_isr < 0)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
sizeof(u32));
}
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
if (vapic_addr) {
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.apic->vapic_cache,
vapic_addr, sizeof(u32)))
return -EINVAL;
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
} else {
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
}
vcpu->arch.apic->vapic_addr = vapic_addr;
return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
/* if this is ICR write vector before command */
if (msr == 0x830)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
return 1;
if (apic_reg_read(apic, reg, 4, &low))
return 1;
if (msr == 0x830)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
return 0;
}
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (!kvm_vcpu_has_lapic(vcpu))
return 1;
/* if this is ICR write vector before command */
if (reg == APIC_ICR)
apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
return apic_reg_write(apic, reg, (u32)data);
}
int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u32 low, high = 0;
if (!kvm_vcpu_has_lapic(vcpu))
return 1;
if (apic_reg_read(apic, reg, 4, &low))
return 1;
if (reg == APIC_ICR)
apic_reg_read(apic, APIC_ICR2, 4, &high);
*data = (((u64)high) << 32) | low;
return 0;
}
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
{
u64 addr = data & ~KVM_MSR_ENABLED;
if (!IS_ALIGNED(addr, 4))
return 1;
vcpu->arch.pv_eoi.msr_val = data;
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
addr, sizeof(u8));
}
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
unsigned int sipi_vector;
unsigned long pe;
if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
return;
pe = xchg(&apic->pending_events, 0);
if (test_bit(KVM_APIC_INIT, &pe)) {
kvm_lapic_reset(vcpu);
kvm_vcpu_reset(vcpu);
if (kvm_vcpu_is_bsp(apic->vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
}
if (test_bit(KVM_APIC_SIPI, &pe) &&
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
/* evaluate pending_events before reading the vector */
smp_rmb();
sipi_vector = apic->sipi_vector;
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, sipi_vector);
kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
void kvm_lapic_init(void)
{
/* do not patch jump label more than once per second */
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5797_0 |
crossvul-cpp_data_bad_3527_0 | /*
* fs/nfs/nfs4proc.c
*
* Client-side procedure declarations for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
* Andy Adamson <andros@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_mount.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "callback.h"
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PROC
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
#define NFS4_MAX_LOOP_ON_RECOVER (10)
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state);
#ifdef CONFIG_NFS_V4_1
static int nfs41_test_stateid(struct nfs_server *, struct nfs4_state *);
static int nfs41_free_stateid(struct nfs_server *, struct nfs4_state *);
#endif
/* Prevent leaks of NFSv4 errors into userland */
static int nfs4_map_errors(int err)
{
if (err >= -1000)
return err;
switch (err) {
case -NFS4ERR_RESOURCE:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
return -EPERM;
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
break;
}
return -EIO;
}
/*
* This is our standard bitmap for GETATTR requests.
*/
const u32 nfs4_fattr_bitmap[2] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
};
const u32 nfs4_statfs_bitmap[2] = {
FATTR4_WORD0_FILES_AVAIL
| FATTR4_WORD0_FILES_FREE
| FATTR4_WORD0_FILES_TOTAL,
FATTR4_WORD1_SPACE_AVAIL
| FATTR4_WORD1_SPACE_FREE
| FATTR4_WORD1_SPACE_TOTAL
};
const u32 nfs4_pathconf_bitmap[2] = {
FATTR4_WORD0_MAXLINK
| FATTR4_WORD0_MAXNAME,
0
};
const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
| FATTR4_WORD0_MAXREAD
| FATTR4_WORD0_MAXWRITE
| FATTR4_WORD0_LEASE_TIME,
FATTR4_WORD1_TIME_DELTA
| FATTR4_WORD1_FS_LAYOUT_TYPES,
FATTR4_WORD2_LAYOUT_BLKSIZE
};
const u32 nfs4_fs_locations_bitmap[2] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID
| FATTR4_WORD0_FS_LOCATIONS,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
| FATTR4_WORD1_TIME_ACCESS
| FATTR4_WORD1_TIME_METADATA
| FATTR4_WORD1_TIME_MODIFY
| FATTR4_WORD1_MOUNTED_ON_FILEID
};
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
struct nfs4_readdir_arg *readdir)
{
__be32 *start, *p;
BUG_ON(readdir->count < 80);
if (cookie > 2) {
readdir->cookie = cookie;
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
return;
}
readdir->cookie = 0;
memset(&readdir->verifier, 0, sizeof(readdir->verifier));
if (cookie == 2)
return;
/*
* NFSv4 servers do not return entries for '.' and '..'
* Therefore, we fake these entries here. We let '.'
* have cookie 0 and '..' have cookie 1. Note that
* when talking to the server, we always send cookie 0
* instead of 1 or 2.
*/
start = p = kmap_atomic(*readdir->pages, KM_USER0);
if (cookie == 0) {
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_one; /* cookie, second word */
*p++ = xdr_one; /* entry len */
memcpy(p, ".\0\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
}
*p++ = xdr_one; /* next */
*p++ = xdr_zero; /* cookie, first word */
*p++ = xdr_two; /* cookie, second word */
*p++ = xdr_two; /* entry len */
memcpy(p, "..\0\0", 4); /* entry */
p++;
*p++ = xdr_one; /* bitmap length */
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
*p++ = htonl(8); /* attribute buffer length */
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
readdir->pgbase = (char *)p - (char *)start;
readdir->count -= readdir->pgbase;
kunmap_atomic(start, KM_USER0);
}
static int nfs4_wait_clnt_recover(struct nfs_client *clp)
{
int res;
might_sleep();
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
nfs_wait_bit_killable, TASK_KILLABLE);
return res;
}
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
{
int res = 0;
might_sleep();
if (*timeout <= 0)
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
schedule_timeout_killable(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
return res;
}
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = exception->state;
int ret = errorcode;
exception->retry = 0;
switch(errorcode) {
case 0:
return 0;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
if (state == NULL)
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL)
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
nfs4_schedule_session_recovery(clp->cl_session);
exception->retry = 1;
break;
#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
/* We have retried a decent amount, time to
* fail
*/
ret = -EBUSY;
break;
}
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
ret = nfs4_delay(server->client, &exception->timeout);
if (ret != 0)
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
exception->retry = 1;
break;
case -NFS4ERR_BADOWNER:
/* The following works around a Linux server bug! */
case -NFS4ERR_BADNAME:
if (server->caps & NFS_CAP_UIDGID_NOMAP) {
server->caps &= ~NFS_CAP_UIDGID_NOMAP;
exception->retry = 1;
printk(KERN_WARNING "NFS: v4 server %s "
"does not accept raw "
"uid/gids. "
"Reenabling the idmapper.\n",
server->nfs_client->cl_hostname);
}
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
wait_on_recovery:
ret = nfs4_wait_clnt_recover(clp);
if (ret == 0)
exception->retry = 1;
return ret;
}
static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
{
spin_lock(&clp->cl_lock);
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
}
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
{
do_renew_lease(server->nfs_client, timestamp);
}
#if defined(CONFIG_NFS_V4_1)
/*
* nfs4_free_slot - free a slot and efficiently update slot table.
*
* freeing a slot is trivially done by clearing its respective bit
* in the bitmap.
* If the freed slotid equals highest_used_slotid we want to update it
* so that the server would be able to size down the slot table if needed,
* otherwise we know that the highest_used_slotid is still in use.
* When updating highest_used_slotid there may be "holes" in the bitmap
* so we need to scan down from highest_used_slotid to 0 looking for the now
* highest slotid in use.
* If none found, highest_used_slotid is set to -1.
*
* Must be called while holding tbl->slot_tbl_lock
*/
static void
nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
int slotid = free_slotid;
BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
/* clear used bit in bitmap */
__clear_bit(slotid, tbl->used_slots);
/* update highest_used_slotid when it is freed */
if (slotid == tbl->highest_used_slotid) {
slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
if (slotid < tbl->max_slots)
tbl->highest_used_slotid = slotid;
else
tbl->highest_used_slotid = -1;
}
dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
free_slotid, tbl->highest_used_slotid);
}
/*
* Signal state manager thread if session fore channel is drained
*/
static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
{
struct rpc_task *task;
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
if (task)
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
return;
}
if (ses->fc_slot_table.highest_used_slotid != -1)
return;
dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
complete(&ses->fc_slot_table.complete);
}
/*
* Signal state manager thread if session back channel is drained
*/
void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
{
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
ses->bc_slot_table.highest_used_slotid != -1)
return;
dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
complete(&ses->bc_slot_table.complete);
}
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
{
struct nfs4_slot_table *tbl;
tbl = &res->sr_session->fc_slot_table;
if (!res->sr_slot) {
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
dprintk("%s: No slot\n", __func__);
return;
}
spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
}
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
unsigned long timestamp;
struct nfs_client *clp;
/*
* sr_status remains 1 if an RPC level error occurred. The server
* may or may not have processed the sequence operation..
* Proceed as if the server received and processed the sequence
* operation.
*/
if (res->sr_status == 1)
res->sr_status = NFS_OK;
/* don't increment the sequence number if the task wasn't sent */
if (!RPC_WAS_SENT(task))
goto out;
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
/* Update the slot's sequence and clientid lease timer */
++res->sr_slot->seq_nr;
timestamp = res->sr_renewal_time;
clp = res->sr_session->clp;
do_renew_lease(clp, timestamp);
/* Check sequence flags */
if (res->sr_status_flags != 0)
nfs4_schedule_lease_recovery(clp);
break;
case -NFS4ERR_DELAY:
/* The server detected a resend of the RPC call and
* returned NFS4ERR_DELAY as per Section 2.10.6.2
* of RFC5661.
*/
dprintk("%s: slot=%td seq=%d: Operation in progress\n",
__func__,
res->sr_slot - res->sr_session->fc_slot_table.slots,
res->sr_slot->seq_nr);
goto out_retry;
default:
/* Just update the slot sequence no. */
++res->sr_slot->seq_nr;
}
out:
/* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
nfs41_sequence_free_slot(res);
return 1;
out_retry:
if (!rpc_restart_call(task))
goto out;
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return 0;
}
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
if (res->sr_session == NULL)
return 1;
return nfs41_sequence_done(task, res);
}
/*
* nfs4_find_slot - efficiently look for a free slot
*
* nfs4_find_slot looks for an unset bit in the used_slots bitmap.
* If found, we mark the slot as used, update the highest_used_slotid,
* and respectively set up the sequence operation args.
* The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
*
* Note: must be called with under the slot_tbl_lock.
*/
static u8
nfs4_find_slot(struct nfs4_slot_table *tbl)
{
int slotid;
u8 ret_id = NFS4_MAX_SLOT_TABLE;
BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
__func__, tbl->used_slots[0], tbl->highest_used_slotid,
tbl->max_slots);
slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
if (slotid >= tbl->max_slots)
goto out;
__set_bit(slotid, tbl->used_slots);
if (slotid > tbl->highest_used_slotid)
tbl->highest_used_slotid = slotid;
ret_id = slotid;
out:
dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
return ret_id;
}
int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
struct rpc_task *task)
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
u8 slotid;
dprintk("--> %s\n", __func__);
/* slot already allocated? */
if (res->sr_slot != NULL)
return 0;
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
/* The state manager will wait until the slot table is empty */
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s session is draining\n", __func__);
return -EAGAIN;
}
if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s enforce FIFO order\n", __func__);
return -EAGAIN;
}
slotid = nfs4_find_slot(tbl);
if (slotid == NFS4_MAX_SLOT_TABLE) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("<-- %s: no free slots\n", __func__);
return -EAGAIN;
}
spin_unlock(&tbl->slot_tbl_lock);
rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
slot = tbl->slots + slotid;
args->sa_session = session;
args->sa_slotid = slotid;
args->sa_cache_this = cache_reply;
dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
res->sr_session = session;
res->sr_slot = slot;
res->sr_renewal_time = jiffies;
res->sr_status_flags = 0;
/*
* sr_status is only set in decode_sequence, and so will remain
* set to 1 if an rpc level failure occurs.
*/
res->sr_status = 1;
return 0;
}
EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
struct rpc_task *task)
{
struct nfs4_session *session = nfs4_get_session(server);
int ret = 0;
if (session == NULL) {
args->sa_session = NULL;
res->sr_session = NULL;
goto out;
}
dprintk("--> %s clp %p session %p sr_slot %td\n",
__func__, session->clp, session, res->sr_slot ?
res->sr_slot - session->fc_slot_table.slots : -1);
ret = nfs41_setup_sequence(session, args, res, cache_reply,
task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs41_call_sync_data {
const struct nfs_server *seq_server;
struct nfs4_sequence_args *seq_args;
struct nfs4_sequence_res *seq_res;
int cache_reply;
};
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
if (nfs4_setup_sequence(data->seq_server, data->seq_args,
data->seq_res, data->cache_reply, task))
return;
rpc_call_start(task);
}
static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs41_call_sync_prepare(task, calldata);
}
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
nfs41_sequence_done(task, data->seq_res);
}
struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_prepare = nfs41_call_sync_prepare,
.rpc_call_done = nfs41_call_sync_done,
};
struct rpc_call_ops nfs41_call_priv_sync_ops = {
.rpc_call_prepare = nfs41_call_priv_sync_prepare,
.rpc_call_done = nfs41_call_sync_done,
};
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
int privileged)
{
int ret;
struct rpc_task *task;
struct nfs41_call_sync_data data = {
.seq_server = server,
.seq_args = args,
.seq_res = res,
.cache_reply = cache_reply,
};
struct rpc_task_setup task_setup = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &nfs41_call_sync_ops,
.callback_data = &data
};
res->sr_slot = NULL;
if (privileged)
task_setup.callback_ops = &nfs41_call_priv_sync_ops;
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
ret = task->tk_status;
rpc_put_task(task);
}
return ret;
}
int _nfs4_call_sync_session(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0);
}
#else
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
return 1;
}
#endif /* CONFIG_NFS_V4_1 */
int _nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
args->sa_session = res->sr_session = NULL;
return rpc_call_sync(clnt, msg, 0);
}
static inline
int nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply)
{
return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
args, res, cache_reply);
}
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
dir->i_version = cinfo->after;
spin_unlock(&dir->i_lock);
}
struct nfs4_opendata {
struct kref kref;
struct nfs_openargs o_arg;
struct nfs_openres o_res;
struct nfs_open_confirmargs c_arg;
struct nfs_open_confirmres c_res;
struct nfs_fattr f_attr;
struct nfs_fattr dir_attr;
struct dentry *dir;
struct dentry *dentry;
struct nfs4_state_owner *owner;
struct nfs4_state *state;
struct iattr attrs;
unsigned long timestamp;
unsigned int rpc_done : 1;
int rpc_status;
int cancelled;
};
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
const struct iattr *attrs,
gfp_t gfp_mask)
{
struct dentry *parent = dget_parent(dentry);
struct inode *dir = parent->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
goto err;
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
if (p->o_arg.seqid == NULL)
goto err_free;
nfs_sb_active(dentry->d_sb);
p->dentry = dget(dentry);
p->dir = parent;
p->owner = sp;
atomic_inc(&sp->so_count);
p->o_arg.fh = NFS_FH(dir);
p->o_arg.open_flags = flags;
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
p->o_arg.clientid = server->nfs_client->cl_clientid;
p->o_arg.id = sp->so_owner_id.id;
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_CREAT) {
u32 *s;
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
s = (u32 *) p->o_arg.u.verifier.data;
s[0] = jiffies;
s[1] = current->pid;
}
p->c_arg.fh = &p->o_res.fh;
p->c_arg.stateid = &p->o_res.stateid;
p->c_arg.seqid = p->o_arg.seqid;
nfs4_init_opendata_res(p);
kref_init(&p->kref);
return p;
err_free:
kfree(p);
err:
dput(parent);
return NULL;
}
static void nfs4_opendata_free(struct kref *kref)
{
struct nfs4_opendata *p = container_of(kref,
struct nfs4_opendata, kref);
struct super_block *sb = p->dentry->d_sb;
nfs_free_seqid(p->o_arg.seqid);
if (p->state != NULL)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
dput(p->dir);
dput(p->dentry);
nfs_sb_deactive(sb);
kfree(p);
}
static void nfs4_opendata_put(struct nfs4_opendata *p)
{
if (p != NULL)
kref_put(&p->kref, nfs4_opendata_free);
}
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
{
int ret;
ret = rpc_wait_for_completion_task(task);
return ret;
}
static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
{
int ret = 0;
if (open_mode & O_EXCL)
goto out;
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
&& state->n_rdonly != 0;
break;
case FMODE_WRITE:
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
&& state->n_wronly != 0;
break;
case FMODE_READ|FMODE_WRITE:
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
&& state->n_rdwr != 0;
}
out:
return ret;
}
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
if (delegation == NULL)
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
return 0;
nfs_mark_delegation_referenced(delegation);
return 1;
}
static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
{
switch (fmode) {
case FMODE_WRITE:
state->n_wronly++;
break;
case FMODE_READ:
state->n_rdonly++;
break;
case FMODE_READ|FMODE_WRITE:
state->n_rdwr++;
}
nfs4_state_set_mode_locked(state, state->state | fmode);
}
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
switch (fmode) {
case FMODE_READ:
set_bit(NFS_O_RDONLY_STATE, &state->flags);
break;
case FMODE_WRITE:
set_bit(NFS_O_WRONLY_STATE, &state->flags);
break;
case FMODE_READ|FMODE_WRITE:
set_bit(NFS_O_RDWR_STATE, &state->flags);
}
}
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
{
write_seqlock(&state->seqlock);
nfs_set_open_stateid_locked(state, stateid, fmode);
write_sequnlock(&state->seqlock);
}
static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
{
/*
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
write_seqlock(&state->seqlock);
if (deleg_stateid != NULL) {
memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
set_bit(NFS_DELEGATED_STATE, &state->flags);
}
if (open_stateid != NULL)
nfs_set_open_stateid_locked(state, open_stateid, fmode);
write_sequnlock(&state->seqlock);
spin_lock(&state->owner->so_lock);
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
}
static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *deleg_cur;
int ret = 0;
fmode &= (FMODE_READ|FMODE_WRITE);
rcu_read_lock();
deleg_cur = rcu_dereference(nfsi->delegation);
if (deleg_cur == NULL)
goto no_delegation;
spin_lock(&deleg_cur->lock);
if (nfsi->delegation != deleg_cur ||
(deleg_cur->type & fmode) != fmode)
goto no_delegation_unlock;
if (delegation == NULL)
delegation = &deleg_cur->stateid;
else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
goto no_delegation_unlock;
nfs_mark_delegation_referenced(deleg_cur);
__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
ret = 1;
no_delegation_unlock:
spin_unlock(&deleg_cur->lock);
no_delegation:
rcu_read_unlock();
if (!ret && open_stateid != NULL) {
__update_open_stateid(state, open_stateid, NULL, fmode);
ret = 1;
}
return ret;
}
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
{
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation == NULL || (delegation->type & fmode) == fmode) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
nfs_inode_return_delegation(inode);
}
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{
struct nfs4_state *state = opendata->state;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags & O_EXCL;
fmode_t fmode = opendata->o_arg.fmode;
nfs4_stateid stateid;
int ret = -EAGAIN;
for (;;) {
if (can_open_cached(state, fmode, open_mode)) {
spin_lock(&state->owner->so_lock);
if (can_open_cached(state, fmode, open_mode)) {
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
goto out_return_state;
}
spin_unlock(&state->owner->so_lock);
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
/* Save the delegation */
memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
rcu_read_unlock();
ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
if (ret != 0)
goto out;
ret = -EAGAIN;
/* Try to update the stateid using the delegation */
if (update_open_stateid(state, NULL, &stateid, fmode))
goto out_return_state;
}
out:
return ERR_PTR(ret);
out_return_state:
atomic_inc(&state->count);
return state;
}
static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
{
struct inode *inode;
struct nfs4_state *state = NULL;
struct nfs_delegation *delegation;
int ret;
if (!data->rpc_done) {
state = nfs4_try_open_cached(data);
goto out;
}
ret = -EAGAIN;
if (!(data->f_attr.valid & NFS_ATTR_FATTR))
goto err;
inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
ret = PTR_ERR(inode);
if (IS_ERR(inode))
goto err;
ret = -ENOMEM;
state = nfs4_get_open_state(inode, data->owner);
if (state == NULL)
goto err_put_inode;
if (data->o_res.delegation_type != 0) {
int delegation_flags = 0;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
"returning a delegation for "
"OPEN(CLAIM_DELEGATE_CUR)\n",
NFS_CLIENT(inode)->cl_server);
} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
else
nfs_inode_reclaim_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
}
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
iput(inode);
out:
return state;
err_put_inode:
iput(inode);
err:
return ERR_PTR(ret);
}
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_open_context *ctx;
spin_lock(&state->inode->i_lock);
list_for_each_entry(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
continue;
get_nfs_open_context(ctx);
spin_unlock(&state->inode->i_lock);
return ctx;
}
spin_unlock(&state->inode->i_lock);
return ERR_PTR(-ENOENT);
}
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs4_opendata *opendata;
opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
atomic_inc(&state->count);
return opendata;
}
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
{
struct nfs4_state *newstate;
int ret;
opendata->o_arg.open_flags = 0;
opendata->o_arg.fmode = fmode;
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
ret = _nfs4_recover_proc_open(opendata);
if (ret != 0)
return ret;
newstate = nfs4_opendata_to_nfs4_state(opendata);
if (IS_ERR(newstate))
return PTR_ERR(newstate);
nfs4_close_state(newstate, fmode);
*res = newstate;
return 0;
}
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
{
struct nfs4_state *newstate;
int ret;
/* memory barrier prior to reading state->n_* */
clear_bit(NFS_DELEGATED_STATE, &state->flags);
smp_rmb();
if (state->n_rdwr != 0) {
clear_bit(NFS_O_RDWR_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_wronly != 0) {
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
if (state->n_rdonly != 0) {
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
if (ret != 0)
return ret;
if (newstate != state)
return -ESTALE;
}
/*
* We may have performed cached opens for all three recoveries.
* Check if we need to update the current stateid.
*/
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
write_seqlock(&state->seqlock);
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
write_sequnlock(&state->seqlock);
}
return 0;
}
/*
* OPEN_RECLAIM:
* reclaim state on the server after a reboot.
*/
static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_delegation *delegation;
struct nfs4_opendata *opendata;
fmode_t delegation_type = 0;
int status;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
opendata->o_arg.fh = NFS_FH(state->inode);
rcu_read_lock();
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
delegation_type = delegation->type;
rcu_read_unlock();
opendata->o_arg.u.delegation_type = delegation_type;
status = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata);
return status;
}
static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_do_open_reclaim(ctx, state);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_open_context *ctx;
int ret;
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = nfs4_do_open_reclaim(ctx, state);
put_nfs_open_context(ctx);
return ret;
}
static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_opendata *opendata;
int ret;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
memcpy(opendata->o_arg.u.delegation.data, stateid->data,
sizeof(opendata->o_arg.u.delegation.data));
ret = nfs4_open_recover(opendata, state);
nfs4_opendata_put(opendata);
return ret;
}
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
struct nfs_server *server = NFS_SERVER(state->inode);
int err;
do {
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
case -ENOENT:
case -ESTALE:
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs4_schedule_stateid_recovery(server, state);
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
case -ENOMEM:
err = 0;
goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
if (data->rpc_status == 0) {
memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
sizeof(data->o_res.stateid.data));
nfs_confirm_seqid(&data->owner->so_seqid, 0);
renew_lease(data->o_res.server, data->timestamp);
data->rpc_done = 1;
}
}
static void nfs4_open_confirm_release(void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
if (data->cancelled == 0)
goto out_free;
/* In case of error, no cleanup! */
if (!data->rpc_done)
goto out_free;
state = nfs4_opendata_to_nfs4_state(data);
if (!IS_ERR(state))
nfs4_close_state(state, data->o_arg.fmode);
out_free:
nfs4_opendata_put(data);
}
static const struct rpc_call_ops nfs4_open_confirm_ops = {
.rpc_call_done = nfs4_open_confirm_done,
.rpc_release = nfs4_open_confirm_release,
};
/*
* Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
*/
static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
{
struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
.rpc_argp = &data->c_arg,
.rpc_resp = &data->c_res,
.rpc_cred = data->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_open_confirm_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
data->timestamp = jiffies;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0) {
data->cancelled = 1;
smp_wmb();
} else
status = data->rpc_status;
rpc_put_task(task);
return status;
}
static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state_owner *sp = data->owner;
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
return;
/*
* Check if we still need to send an OPEN call, or if we can use
* a delegation instead.
*/
if (data->state != NULL) {
struct nfs_delegation *delegation;
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
can_open_delegated(delegation, data->o_arg.fmode))
goto unlock_no_action;
rcu_read_unlock();
}
/* Update sequence id. */
data->o_arg.id = sp->so_owner_id.id;
data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->o_arg.server,
&data->o_arg.seq_args,
&data->o_res.seq_res, 1, task))
return;
rpc_call_start(task);
return;
unlock_no_action:
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
}
static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs4_open_prepare(task, calldata);
}
static void nfs4_open_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
if (!nfs4_sequence_done(task, &data->o_res.seq_res))
return;
if (task->tk_status == 0) {
switch (data->o_res.f_attr->mode & S_IFMT) {
case S_IFREG:
break;
case S_IFLNK:
data->rpc_status = -ELOOP;
break;
case S_IFDIR:
data->rpc_status = -EISDIR;
break;
default:
data->rpc_status = -ENOTDIR;
}
renew_lease(data->o_res.server, data->timestamp);
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
nfs_confirm_seqid(&data->owner->so_seqid, 0);
}
data->rpc_done = 1;
}
static void nfs4_open_release(void *calldata)
{
struct nfs4_opendata *data = calldata;
struct nfs4_state *state = NULL;
/* If this request hasn't been cancelled, do nothing */
if (data->cancelled == 0)
goto out_free;
/* In case of error, no cleanup! */
if (data->rpc_status != 0 || !data->rpc_done)
goto out_free;
/* In case we need an open_confirm, no cleanup! */
if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
goto out_free;
state = nfs4_opendata_to_nfs4_state(data);
if (!IS_ERR(state))
nfs4_close_state(state, data->o_arg.fmode);
out_free:
nfs4_opendata_put(data);
}
static const struct rpc_call_ops nfs4_open_ops = {
.rpc_call_prepare = nfs4_open_prepare,
.rpc_call_done = nfs4_open_done,
.rpc_release = nfs4_open_release,
};
static const struct rpc_call_ops nfs4_recover_open_ops = {
.rpc_call_prepare = nfs4_recover_open_prepare,
.rpc_call_done = nfs4_open_done,
.rpc_release = nfs4_open_release,
};
static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
.rpc_argp = o_arg,
.rpc_resp = o_res,
.rpc_cred = data->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_open_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
data->cancelled = 0;
if (isrecover)
task_setup_data.callback_ops = &nfs4_recover_open_ops;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0) {
data->cancelled = 1;
smp_wmb();
} else
status = data->rpc_status;
rpc_put_task(task);
return status;
}
static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
{
struct inode *dir = data->dir->d_inode;
struct nfs_openres *o_res = &data->o_res;
int status;
status = nfs4_run_open_task(data, 1);
if (status != 0 || !data->rpc_done)
return status;
nfs_refresh_inode(dir, o_res->dir_attr);
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
return status;
}
return status;
}
/*
* Note: On error, nfs4_proc_open will free the struct nfs4_opendata
*/
static int _nfs4_proc_open(struct nfs4_opendata *data)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_openargs *o_arg = &data->o_arg;
struct nfs_openres *o_res = &data->o_res;
int status;
status = nfs4_run_open_task(data, 0);
if (!data->rpc_done)
return status;
if (status != 0) {
if (status == -NFS4ERR_BADNAME &&
!(o_arg->open_flags & O_CREAT))
return -ENOENT;
return status;
}
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
return status;
}
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
return 0;
}
static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
{
unsigned int loop;
int ret;
for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
break;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
break;
nfs4_schedule_state_manager(clp);
ret = -EIO;
}
return ret;
}
static int nfs4_recover_expired_lease(struct nfs_server *server)
{
return nfs4_client_recover_expired_lease(server->nfs_client);
}
/*
* OPEN_EXPIRED:
* reclaim state on the server after a network partition.
* Assumes caller holds the appropriate lock
*/
static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs4_opendata *opendata;
int ret;
opendata = nfs4_open_recoverdata_alloc(ctx, state);
if (IS_ERR(opendata))
return PTR_ERR(opendata);
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE)
d_drop(ctx->dentry);
nfs4_opendata_put(opendata);
return ret;
}
static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_open_expired(ctx, state);
switch (err) {
default:
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
} while (exception.retry);
out:
return err;
}
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_open_context *ctx;
int ret;
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = nfs4_do_open_expired(ctx, state);
put_nfs_open_context(ctx);
return ret;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
int status;
struct nfs_server *server = NFS_SERVER(state->inode);
status = nfs41_test_stateid(server, state);
if (status == NFS_OK)
return 0;
nfs41_free_stateid(server, state);
return nfs4_open_expired(sp, state);
}
#endif
/*
* on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
* fields corresponding to attributes that were used to store the verifier.
* Make sure we clobber those fields in the later setattr call
*/
static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
{
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
!(sattr->ia_valid & ATTR_ATIME_SET))
sattr->ia_valid |= ATTR_ATIME;
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
!(sattr->ia_valid & ATTR_MTIME_SET))
sattr->ia_valid |= ATTR_MTIME;
}
/*
* Returns a referenced nfs4_state
*/
static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
{
struct nfs4_state_owner *sp;
struct nfs4_state *state = NULL;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *opendata;
int status;
/* Protect against reboot recovery conflicts */
status = -ENOMEM;
if (!(sp = nfs4_get_state_owner(server, cred))) {
dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
goto out_err;
}
status = nfs4_recover_expired_lease(server);
if (status != 0)
goto err_put_state_owner;
if (dentry->d_inode != NULL)
nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
status = -ENOMEM;
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
if (opendata == NULL)
goto err_put_state_owner;
if (dentry->d_inode != NULL)
opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
status = _nfs4_proc_open(opendata);
if (status != 0)
goto err_opendata_put;
state = nfs4_opendata_to_nfs4_state(opendata);
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
if (opendata->o_arg.open_flags & O_EXCL) {
nfs4_exclusive_attrset(opendata, sattr);
nfs_fattr_init(opendata->o_res.f_attr);
status = nfs4_do_setattr(state->inode, cred,
opendata->o_res.f_attr, sattr,
state);
if (status == 0)
nfs_setattr_update_inode(state->inode, sattr);
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
}
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
return 0;
err_opendata_put:
nfs4_opendata_put(opendata);
err_put_state_owner:
nfs4_put_state_owner(sp);
out_err:
*res = NULL;
return status;
}
static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
{
struct nfs4_exception exception = { };
struct nfs4_state *res;
int status;
do {
status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
if (status == 0)
break;
/* NOTE: BAD_SEQID means the server and client disagree about the
* book-keeping w.r.t. state-changing operations
* (OPEN/CLOSE/LOCK/LOCKU...)
* It is actually a sign of a bug on the client or on the server.
*
* If we receive a BAD_SEQID error in the particular case of
* doing an OPEN, we assume that nfs_increment_open_seqid() will
* have unhashed the old state_owner for us, and that we can
* therefore safely retry using a new one. We should still warn
* the user though...
*/
if (status == -NFS4ERR_BAD_SEQID) {
printk(KERN_WARNING "NFS: v4 server %s "
" returned a bad sequence-id error!\n",
NFS_SERVER(dir)->nfs_client->cl_hostname);
exception.retry = 1;
continue;
}
/*
* BAD_STATEID on OPEN means that the server cancelled our
* state before it received the OPEN_CONFIRM.
* Recover by retrying the request as per the discussion
* on Page 181 of RFC3530.
*/
if (status == -NFS4ERR_BAD_STATEID) {
exception.retry = 1;
continue;
}
if (status == -EAGAIN) {
/* We must have found a delegation */
exception.retry = 1;
continue;
}
res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
status, &exception));
} while (exception.retry);
return res;
}
static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_setattrargs arg = {
.fh = NFS_FH(inode),
.iap = sattr,
.server = server,
.bitmask = server->attr_bitmask,
};
struct nfs_setattrres res = {
.fattr = fattr,
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
.rpc_argp = &arg,
.rpc_resp = &res,
.rpc_cred = cred,
};
unsigned long timestamp = jiffies;
int status;
nfs_fattr_init(fattr);
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
/* Use that stateid */
} else if (state != NULL) {
nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
}
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_do_setattr(inode, cred, fattr, sattr, state),
&exception);
} while (exception.retry);
return err;
}
struct nfs4_closedata {
struct inode *inode;
struct nfs4_state *state;
struct nfs_closeargs arg;
struct nfs_closeres res;
struct nfs_fattr fattr;
unsigned long timestamp;
bool roc;
u32 roc_barrier;
};
static void nfs4_free_closedata(void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state_owner *sp = calldata->state->owner;
struct super_block *sb = calldata->state->inode->i_sb;
if (calldata->roc)
pnfs_roc_release(calldata->state->inode);
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
nfs_sb_deactive(sb);
kfree(calldata);
}
static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
fmode_t fmode)
{
spin_lock(&state->owner->so_lock);
if (!(fmode & FMODE_READ))
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
if (!(fmode & FMODE_WRITE))
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
spin_unlock(&state->owner->so_lock);
}
static void nfs4_close_done(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
switch (task->tk_status) {
case 0:
if (calldata->roc)
pnfs_roc_set_barrier(state->inode,
calldata->roc_barrier);
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
nfs4_close_clear_stateid_flags(state,
calldata->arg.fmode);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
if (calldata->arg.fmode == 0)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
rpc_restart_call_prepare(task);
}
nfs_release_seqid(calldata->arg.seqid);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
int call_close = 0;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
spin_lock(&state->owner->so_lock);
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
calldata->arg.fmode &= ~FMODE_READ;
}
if (state->n_wronly == 0) {
call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
calldata->arg.fmode &= ~FMODE_WRITE;
}
}
spin_unlock(&state->owner->so_lock);
if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
return;
}
if (calldata->arg.fmode == 0) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
if (calldata->roc &&
pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
task, NULL);
return;
}
}
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
&calldata->arg.seq_args, &calldata->res.seq_res,
1, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_close_ops = {
.rpc_call_prepare = nfs4_close_prepare,
.rpc_call_done = nfs4_close_done,
.rpc_release = nfs4_free_closedata,
};
/*
* It is possible for data to be read/written from a mem-mapped file
* after the sys_close call (which hits the vfs layer as a flush).
* This means that we can't safely call nfsv4 close on a file until
* the inode is cleared. This in turn means that we are not good
* NFSv4 citizens - we do not indicate to the server to update the file's
* share state even when we are done with one of the three share
* stateid's in the inode.
*
* NOTE: Caller must be holding the sp->so_owner semaphore!
*/
int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_closedata *calldata;
struct nfs4_state_owner *sp = state->owner;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
.rpc_cred = state->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_close_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
calldata = kzalloc(sizeof(*calldata), gfp_mask);
if (calldata == NULL)
goto out;
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
if (calldata->arg.seqid == NULL)
goto out_free_calldata;
calldata->arg.fmode = 0;
calldata->arg.bitmask = server->cache_consistency_bitmask;
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->roc = roc;
nfs_sb_active(calldata->inode->i_sb);
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = 0;
if (wait)
status = rpc_wait_for_completion_task(task);
rpc_put_task(task);
return status;
out_free_calldata:
kfree(calldata);
out:
if (roc)
pnfs_roc_release(state->inode);
nfs4_put_open_state(state);
nfs4_put_state_owner(sp);
return status;
}
static struct inode *
nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
{
struct nfs4_state *state;
/* Protect against concurrent sillydeletes */
state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
if (IS_ERR(state))
return ERR_CAST(state);
ctx->state = state;
return igrab(state->inode);
}
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
if (ctx->state == NULL)
return;
if (is_sync)
nfs4_close_sync(ctx->state, ctx->mode);
else
nfs4_close_state(ctx->state, ctx->mode);
}
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
struct nfs4_server_caps_arg args = {
.fhandle = fhandle,
};
struct nfs4_server_caps_res res = {};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
NFS_CAP_CTIME|NFS_CAP_MTIME);
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
server->caps |= NFS_CAP_ACLS;
if (res.has_links != 0)
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
server->caps |= NFS_CAP_FILEID;
if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
server->caps |= NFS_CAP_MODE;
if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
server->caps |= NFS_CAP_NLINK;
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
server->caps |= NFS_CAP_OWNER;
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
server->caps |= NFS_CAP_OWNER_GROUP;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
server->caps |= NFS_CAP_ATIME;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
server->caps |= NFS_CAP_CTIME;
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
server->caps |= NFS_CAP_MTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->acl_bitmask = res.acl_bitmask;
}
return status;
}
int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
struct nfs4_lookup_root_arg args = {
.bitmask = nfs4_fattr_bitmap,
};
struct nfs4_lookup_res res = {
.server = server,
.fattr = info->fattr,
.fh = fhandle,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(info->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_lookup_root(server, fhandle, info);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
break;
default:
err = nfs4_handle_exception(server, err, &exception);
}
} while (exception.retry);
return err;
}
static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, rpc_authflavor_t flavor)
{
struct rpc_auth *auth;
int ret;
auth = rpcauth_create(flavor, server->client);
if (!auth) {
ret = -EIO;
goto out;
}
ret = nfs4_lookup_root(server, fhandle, info);
out:
return ret;
}
static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int i, len, status = 0;
rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
len = gss_mech_list_pseudoflavors(&flav_array[0]);
flav_array[len] = RPC_AUTH_NULL;
len += 1;
for (i = 0; i < len; i++) {
status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
break;
}
/*
* -EACCESS could mean that the user doesn't have correct permissions
* to access the mount. It could also mean that we tried to mount
* with a gss auth flavor, but rpc.gssd isn't running. Either way,
* existing mount programs don't handle -EACCES very well so it should
* be mapped to -EPERM instead.
*/
if (status == -EACCES)
status = -EPERM;
return status;
}
/*
* get the file handle for the "/" directory on the server
*/
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int minor_version = server->nfs_client->cl_minorversion;
int status = nfs4_lookup_root(server, fhandle, info);
if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
/*
* A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
* by nfs4_map_errors() as this function exits.
*/
status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
if (status == 0)
status = nfs4_do_fsinfo(server, fhandle, info);
return nfs4_map_errors(status);
}
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
/*
* Get locations and (maybe) other attributes of a referral.
* Note that we'll actually follow the referral later when
* we detect fsid mismatch in inode revalidation
*/
static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
struct nfs_fattr *fattr, struct nfs_fh *fhandle)
{
int status = -ENOMEM;
struct page *page = NULL;
struct nfs4_fs_locations *locations = NULL;
page = alloc_page(GFP_KERNEL);
if (page == NULL)
goto out;
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
if (locations == NULL)
goto out;
status = nfs4_proc_fs_locations(dir, name, locations, page);
if (status != 0)
goto out;
/* Make sure server returned a different fsid for the referral */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
status = -EIO;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
nfs_fixup_referral_attributes(&locations->fattr);
/* replace the lookup nfs_fattr with the locations nfs_fattr */
memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
__free_page(page);
kfree(locations);
return status;
}
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_getattr_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_getattr_res res = {
.fattr = fattr,
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_getattr(server, fhandle, fattr),
&exception);
} while (exception.retry);
return err;
}
/*
* The file is not closed if it is opened due to the a request to change
* the size of the file. The open call will not be needed once the
* VFS layer lookup-intents are implemented.
*
* Close is called when the inode is destroyed.
* If we haven't opened the file for O_WRONLY, we
* need to in the size_change case to obtain a stateid.
*
* Got race?
* Because OPEN is always done by name in nfsv4, it is
* possible that we opened a different file by the same
* name. We can recognize this race condition, but we
* can't do anything about it besides returning an error.
*
* This will be fixed with VFS changes (lookup-intent).
*/
static int
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
struct inode *inode = dentry->d_inode;
struct rpc_cred *cred = NULL;
struct nfs4_state *state = NULL;
int status;
if (pnfs_ld_layoutret_on_setattr(inode))
pnfs_return_layout(inode);
nfs_fattr_init(fattr);
/* Search for an existing open(O_WRITE) file */
if (sattr->ia_valid & ATTR_FILE) {
struct nfs_open_context *ctx;
ctx = nfs_file_open_context(sattr->ia_file);
if (ctx) {
cred = ctx->cred;
state = ctx->state;
}
}
status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
if (status == 0)
nfs_setattr_update_inode(inode, sattr);
return status;
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_lookup_res res = {
.server = server,
.fattr = fattr,
.fh = fhandle,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fattr);
dprintk("NFS call lookup %s\n", name->name);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
{
memset(fh, 0, sizeof(struct nfs_fh));
fattr->fsid.major = 1;
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
fattr->nlink = 2;
}
static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
int status;
status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
switch (status) {
case -NFS4ERR_BADNAME:
return -ENOENT;
case -NFS4ERR_MOVED:
return nfs4_get_referral(dir, name, fattr, fhandle);
case -NFS4ERR_WRONGSEC:
nfs_fixup_secinfo_attributes(fattr, fhandle);
}
err = nfs4_handle_exception(NFS_SERVER(dir),
status, &exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
.bitmask = server->attr_bitmask,
};
struct nfs4_accessres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = entry->cred,
};
int mode = entry->mask;
int status;
/*
* Determine which access bits we want to ask for...
*/
if (mode & MAY_READ)
args.access |= NFS4_ACCESS_READ;
if (S_ISDIR(inode->i_mode)) {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_LOOKUP;
} else {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_EXECUTE;
}
res.fattr = nfs_alloc_fattr();
if (res.fattr == NULL)
return -ENOMEM;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
entry->mask |= MAY_READ;
if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
entry->mask |= MAY_WRITE;
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
entry->mask |= MAY_EXEC;
nfs_refresh_inode(inode, res.fattr);
}
nfs_free_fattr(res.fattr);
return status;
}
static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_access(inode, entry),
&exception);
} while (exception.retry);
return err;
}
/*
* TODO: For the time being, we don't try to get any attributes
* along with any of the zero-copy operations READ, READDIR,
* READLINK, WRITE.
*
* In the case of the first three, we want to put the GETATTR
* after the read-type operation -- this is because it is hard
* to predict the length of a GETATTR response in v4, and thus
* align the READ data correctly. This means that the GETATTR
* may end up partially falling into the page cache, and we should
* shift it into the 'tail' of the xdr_buf before processing.
* To do this efficiently, we need to know the total length
* of data received, which doesn't seem to be available outside
* of the RPC layer.
*
* In the case of WRITE, we also want to put the GETATTR after
* the operation -- in this case because we want to make sure
* we get the post-operation mtime and size. This means that
* we can't use xdr_encode_pages() as written: we need a variant
* of it which would leave room in the 'tail' iovec.
*
* Both of these changes to the XDR layer would in fact be quite
* minor, but I decided to leave them for a subsequent patch.
*/
static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_readlink args = {
.fh = NFS_FH(inode),
.pgbase = pgbase,
.pglen = pglen,
.pages = &page,
};
struct nfs4_readlink_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
unsigned int pgbase, unsigned int pglen)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_readlink(inode, page, pgbase, pglen),
&exception);
} while (exception.retry);
return err;
}
/*
* Got race?
* We will need to arrange for the VFS layer to provide an atomic open.
* Until then, this create/open method is prone to inefficiency and race
* conditions due to the lookup, create, and open VFS calls from sys_open()
* placed on the wire.
*
* Given the above sorry state of affairs, I'm simply sending an OPEN.
* The file will be opened again in the subsequent VFS open call
* (nfs4_proc_file_open).
*
* The open for read will just hang around to be used by any process that
* opens the file O_RDONLY. This will all be resolved with the VFS changes.
*/
static int
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nfs_open_context *ctx)
{
struct dentry *de = dentry;
struct nfs4_state *state;
struct rpc_cred *cred = NULL;
fmode_t fmode = 0;
int status = 0;
if (ctx != NULL) {
cred = ctx->cred;
de = ctx->dentry;
fmode = ctx->mode;
}
sattr->ia_mode &= ~current_umask();
state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
goto out;
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
if (ctx != NULL)
ctx->state = state;
else
nfs4_close_sync(state, fmode);
out:
return status;
}
static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs args = {
.fh = NFS_FH(dir),
.name.len = name->len,
.name.name = name->name,
.bitmask = server->attr_bitmask,
};
struct nfs_removeres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
}
nfs_free_fattr(res.dir_attr);
out:
return status;
}
static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_remove(dir, name),
&exception);
} while (exception.retry);
return err;
}
static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs *args = msg->rpc_argp;
struct nfs_removeres *res = msg->rpc_resp;
args->bitmask = server->cache_consistency_bitmask;
res->server = server;
res->seq_res.sr_slot = NULL;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
}
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
if (!nfs4_sequence_done(task, &res->seq_res))
return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_renameargs *arg = msg->rpc_argp;
struct nfs_renameres *res = msg->rpc_resp;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
arg->bitmask = server->attr_bitmask;
res->server = server;
}
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
struct inode *new_dir)
{
struct nfs_renameres *res = task->tk_msg.rpc_resp;
if (!nfs4_sequence_done(task, &res->seq_res))
return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(old_dir, &res->old_cinfo);
nfs_post_op_update_inode(old_dir, res->old_fattr);
update_changeattr(new_dir, &res->new_cinfo);
nfs_post_op_update_inode(new_dir, res->new_fattr);
return 1;
}
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs_server *server = NFS_SERVER(old_dir);
struct nfs_renameargs arg = {
.old_dir = NFS_FH(old_dir),
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
.bitmask = server->attr_bitmask,
};
struct nfs_renameres res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.old_fattr = nfs_alloc_fattr();
res.new_fattr = nfs_alloc_fattr();
if (res.old_fattr == NULL || res.new_fattr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
nfs_post_op_update_inode(new_dir, res.new_fattr);
}
out:
nfs_free_fattr(res.new_fattr);
nfs_free_fattr(res.old_fattr);
return status;
}
static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(old_dir),
_nfs4_proc_rename(old_dir, old_name,
new_dir, new_name),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_link_arg arg = {
.fh = NFS_FH(inode),
.dir_fh = NFS_FH(dir),
.name = name,
.bitmask = server->attr_bitmask,
};
struct nfs4_link_res res = {
.server = server,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int status = -ENOMEM;
res.fattr = nfs_alloc_fattr();
res.dir_attr = nfs_alloc_fattr();
if (res.fattr == NULL || res.dir_attr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
out:
nfs_free_fattr(res.dir_attr);
nfs_free_fattr(res.fattr);
return status;
}
static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
_nfs4_proc_link(inode, dir, name),
&exception);
} while (exception.retry);
return err;
}
struct nfs4_createdata {
struct rpc_message msg;
struct nfs4_create_arg arg;
struct nfs4_create_res res;
struct nfs_fh fh;
struct nfs_fattr fattr;
struct nfs_fattr dir_fattr;
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
struct qstr *name, struct iattr *sattr, u32 ftype)
{
struct nfs4_createdata *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data != NULL) {
struct nfs_server *server = NFS_SERVER(dir);
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
data->msg.rpc_argp = &data->arg;
data->msg.rpc_resp = &data->res;
data->arg.dir_fh = NFS_FH(dir);
data->arg.server = server;
data->arg.name = name;
data->arg.attrs = sattr;
data->arg.ftype = ftype;
data->arg.bitmask = server->attr_bitmask;
data->res.server = server;
data->res.fh = &data->fh;
data->res.fattr = &data->fattr;
data->res.dir_fattr = &data->dir_fattr;
nfs_fattr_init(data->res.fattr);
nfs_fattr_init(data->res.dir_fattr);
}
return data;
}
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
}
return status;
}
static void nfs4_free_createdata(struct nfs4_createdata *data)
{
kfree(data);
}
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct page *page, unsigned int len, struct iattr *sattr)
{
struct nfs4_createdata *data;
int status = -ENAMETOOLONG;
if (len > NFS4_MAXPATHLEN)
goto out;
status = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
if (data == NULL)
goto out;
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
data->arg.u.symlink.pages = &page;
data->arg.u.symlink.len = len;
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
struct page *page, unsigned int len, struct iattr *sattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_symlink(dir, dentry, page,
len, sattr),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct iattr *sattr)
{
struct nfs4_createdata *data;
int status = -ENOMEM;
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
if (data == NULL)
goto out;
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
struct iattr *sattr)
{
struct nfs4_exception exception = { };
int err;
sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mkdir(dir, dentry, sattr),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
struct nfs4_readdir_arg args = {
.fh = NFS_FH(dir),
.pages = pages,
.pgbase = 0,
.count = count,
.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
.plus = plus,
};
struct nfs4_readdir_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
int status;
dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
if (status >= 0) {
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
status += args.pgbase;
}
nfs_invalidate_atime(dir);
dprintk("%s: returns %d\n", __func__, status);
return status;
}
static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
_nfs4_proc_readdir(dentry, cred, cookie,
pages, count, plus),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, dev_t rdev)
{
struct nfs4_createdata *data;
int mode = sattr->ia_mode;
int status = -ENOMEM;
BUG_ON(!(sattr->ia_valid & ATTR_MODE));
BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
if (data == NULL)
goto out;
if (S_ISFIFO(mode))
data->arg.ftype = NF4FIFO;
else if (S_ISBLK(mode)) {
data->arg.ftype = NF4BLK;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
}
else if (S_ISCHR(mode)) {
data->arg.ftype = NF4CHR;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
}
status = nfs4_do_create(dir, dentry, data);
nfs4_free_createdata(data);
out:
return status;
}
static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
struct iattr *sattr, dev_t rdev)
{
struct nfs4_exception exception = { };
int err;
sattr->ia_mode &= ~current_umask();
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_mknod(dir, dentry, sattr, rdev),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsstat *fsstat)
{
struct nfs4_statfs_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_statfs_res res = {
.fsstat = fsstat,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
.rpc_argp = &args,
.rpc_resp = &res,
};
nfs_fattr_init(fsstat->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_statfs(server, fhandle, fsstat),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *fsinfo)
{
struct nfs4_fsinfo_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_fsinfo_res res = {
.fsinfo = fsinfo,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_do_fsinfo(server, fhandle, fsinfo),
&exception);
} while (exception.retry);
return err;
}
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
nfs_fattr_init(fsinfo->fattr);
return nfs4_do_fsinfo(server, fhandle, fsinfo);
}
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_pathconf *pathconf)
{
struct nfs4_pathconf_arg args = {
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
struct nfs4_pathconf_res res = {
.pathconf = pathconf,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
.rpc_argp = &args,
.rpc_resp = &res,
};
/* None of the pathconf attributes are mandatory to implement */
if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
memset(pathconf, 0, sizeof(*pathconf));
return 0;
}
nfs_fattr_init(pathconf->fattr);
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_pathconf *pathconf)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_pathconf(server, fhandle, pathconf),
&exception);
} while (exception.retry);
return err;
}
void __nfs4_read_done_cb(struct nfs_read_data *data)
{
nfs_invalidate_atime(data->inode);
}
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
__nfs4_read_done_cb(data);
if (task->tk_status > 0)
renew_lease(server, data->timestamp);
return 0;
}
static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->read_done_cb ? data->read_done_cb(task, data) :
nfs4_read_done_cb(task, data);
}
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
{
data->timestamp = jiffies;
data->read_done_cb = nfs4_read_done_cb;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
}
/* Reset the the nfs_read_data to send the read to the MDS. */
void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
{
dprintk("%s Reset task for i/o through\n", __func__);
put_lseg(data->lseg);
data->lseg = NULL;
/* offsets will differ in the dense stripe case */
data->args.offset = data->mds_offset;
data->ds_clp = NULL;
data->args.fh = NFS_FH(data->inode);
data->read_done_cb = nfs4_read_done_cb;
task->tk_ops = data->mds_ops;
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
}
EXPORT_SYMBOL_GPL(nfs4_reset_read);
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
if (task->tk_status >= 0) {
renew_lease(NFS_SERVER(inode), data->timestamp);
nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
}
return 0;
}
static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->write_done_cb ? data->write_done_cb(task, data) :
nfs4_write_done_cb(task, data);
}
/* Reset the the nfs_write_data to send the write to the MDS. */
void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
{
dprintk("%s Reset task for i/o through\n", __func__);
put_lseg(data->lseg);
data->lseg = NULL;
data->ds_clp = NULL;
data->write_done_cb = nfs4_write_done_cb;
data->args.fh = NFS_FH(data->inode);
data->args.bitmask = data->res.server->cache_consistency_bitmask;
data->args.offset = data->mds_offset;
data->res.fattr = &data->fattr;
task->tk_ops = data->mds_ops;
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
}
EXPORT_SYMBOL_GPL(nfs4_reset_write);
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (data->lseg) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
if (!data->write_done_cb)
data->write_done_cb = nfs4_write_done_cb;
data->res.server = server;
data->timestamp = jiffies;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
}
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return -EAGAIN;
}
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
return data->write_done_cb(task, data);
}
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
if (data->lseg) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
if (!data->write_done_cb)
data->write_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
}
struct nfs4_renewdata {
struct nfs_client *client;
unsigned long timestamp;
};
/*
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW.
*/
static void nfs4_renew_release(void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(data);
}
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
unsigned long timestamp = data->timestamp;
if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
return;
if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
nfs4_schedule_lease_recovery(clp);
return;
}
nfs4_schedule_path_down_recovery(clp);
}
do_renew_lease(clp, timestamp);
}
static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_call_done = nfs4_renew_done,
.rpc_release = nfs4_renew_release,
};
static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
struct nfs4_renewdata *data;
if (renew_flags == 0)
return 0;
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->client = clp;
data->timestamp = jiffies;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
&nfs4_renew_ops, data);
}
static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
if (status < 0)
return status;
do_renew_lease(clp, now);
return 0;
}
static inline int nfs4_server_supports_acls(struct nfs_server *server)
{
return (server->caps & NFS_CAP_ACLS)
&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
}
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
* it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
* the stack.
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
static void buf_to_pages(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
const void *p = buf;
*pgbase = offset_in_page(buf);
p -= *pgbase;
while (p < buf + buflen) {
*(pages++) = virt_to_page(p);
p += PAGE_CACHE_SIZE;
}
}
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
struct page *newpage, **spages;
int rc = 0;
size_t len;
spages = pages;
do {
len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
newpage = alloc_page(GFP_KERNEL);
if (newpage == NULL)
goto unwind;
memcpy(page_address(newpage), buf, len);
buf += len;
buflen -= len;
*pages++ = newpage;
rc++;
} while (buflen != 0);
return rc;
unwind:
for(; rc > 0; rc--)
__free_page(spages[rc-1]);
return -ENOMEM;
}
struct nfs4_cached_acl {
int cached;
size_t len;
char data[0];
};
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
{
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
kfree(nfsi->nfs4_acl);
nfsi->nfs4_acl = acl;
spin_unlock(&inode->i_lock);
}
static void nfs4_zap_acl_attr(struct inode *inode)
{
nfs4_set_cached_acl(inode, NULL);
}
static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_cached_acl *acl;
int ret = -ENOENT;
spin_lock(&inode->i_lock);
acl = nfsi->nfs4_acl;
if (acl == NULL)
goto out;
if (buf == NULL) /* user is just asking for length */
goto out_len;
if (acl->cached == 0)
goto out;
ret = -ERANGE; /* see getxattr(2) man page */
if (acl->len > buflen)
goto out;
memcpy(buf, acl->data, acl->len);
out_len:
ret = acl->len;
out:
spin_unlock(&inode->i_lock);
return ret;
}
static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
{
struct nfs4_cached_acl *acl;
if (buf && acl_len <= PAGE_SIZE) {
acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 1;
memcpy(acl->data, buf, acl_len);
} else {
acl = kmalloc(sizeof(*acl), GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 0;
}
acl->len = acl_len;
out:
nfs4_set_cached_acl(inode, acl);
}
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
struct page *localpage = NULL;
int ret;
if (buflen < PAGE_SIZE) {
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
localpage = alloc_page(GFP_KERNEL);
resp_buf = page_address(localpage);
if (localpage == NULL)
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
if (res.acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, res.acl_len);
else
nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
if (buf) {
ret = -ERANGE;
if (res.acl_len > buflen)
goto out_free;
if (localpage)
memcpy(buf, resp_buf, res.acl_len);
}
ret = res.acl_len;
out_free:
if (localpage)
__free_page(localpage);
return ret;
}
static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct nfs4_exception exception = { };
ssize_t ret;
do {
ret = __nfs4_get_acl_uncached(inode, buf, buflen);
if (ret >= 0)
break;
ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
} while (exception.retry);
return ret;
}
static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
int ret;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_setaclargs arg = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_setaclres res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
.rpc_resp = &res,
};
int ret, i;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
if (i < 0)
return i;
nfs_inode_return_delegation(inode);
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
* Free each page after tx, so the only ref left is
* held by the network stack
*/
for (; i > 0; i--)
put_page(pages[i-1]);
/*
* Acl update can result in inode attribute update.
* so mark the attribute cache invalid.
*/
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
spin_unlock(&inode->i_lock);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
}
static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(inode),
__nfs4_proc_set_acl(inode, buf, buflen),
&exception);
} while (exception.retry);
return err;
}
static int
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
{
struct nfs_client *clp = server->nfs_client;
if (task->tk_status >= 0)
return 0;
switch(task->tk_status) {
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
if (state == NULL)
break;
nfs4_schedule_stateid_recovery(server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
if (state != NULL)
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID:
nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
nfs4_schedule_session_recovery(clp->cl_session);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
return -EAGAIN;
case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
task->tk_status = 0;
return -EAGAIN;
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
wait_on_recovery:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
}
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
unsigned short port, struct rpc_cred *cred,
struct nfs4_setclientid_res *res)
{
nfs4_verifier sc_verifier;
struct nfs4_setclientid setclientid = {
.sc_verifier = &sc_verifier,
.sc_prog = program,
.sc_cb_ident = clp->cl_cb_ident,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
.rpc_argp = &setclientid,
.rpc_resp = res,
.rpc_cred = cred,
};
__be32 *p;
int loop = 0;
int status;
p = (__be32*)sc_verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
for(;;) {
setclientid.sc_name_len = scnprintf(setclientid.sc_name,
sizeof(setclientid.sc_name), "%s/%s %s %s %u",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO),
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_id_uniquifier);
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_NETID));
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
clp->cl_ipaddr, port >> 8, port & 255);
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status != -NFS4ERR_CLID_INUSE)
break;
if (loop != 0) {
++clp->cl_id_uniquifier;
break;
}
++loop;
ssleep(clp->cl_lease_time / HZ + 1);
}
return status;
}
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
struct nfs4_setclientid_res *arg,
struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
.rpc_argp = arg,
.rpc_resp = &fsinfo,
.rpc_cred = cred,
};
unsigned long now;
int status;
now = jiffies;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status == 0) {
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
clp->cl_last_renewal = now;
spin_unlock(&clp->cl_lock);
}
return status;
}
struct nfs4_delegreturndata {
struct nfs4_delegreturnargs args;
struct nfs4_delegreturnres res;
struct nfs_fh fh;
nfs4_stateid stateid;
unsigned long timestamp;
struct nfs_fattr fattr;
int rpc_status;
};
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
renew_lease(data->res.server, data->timestamp);
break;
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
data->rpc_status = task->tk_status;
}
static void nfs4_delegreturn_release(void *calldata)
{
kfree(calldata);
}
#if defined(CONFIG_NFS_V4_1)
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
{
struct nfs4_delegreturndata *d_data;
d_data = (struct nfs4_delegreturndata *)data;
if (nfs4_setup_sequence(d_data->res.server,
&d_data->args.seq_args,
&d_data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
#endif /* CONFIG_NFS_V4_1 */
static const struct rpc_call_ops nfs4_delegreturn_ops = {
#if defined(CONFIG_NFS_V4_1)
.rpc_call_prepare = nfs4_delegreturn_prepare,
#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs4_delegreturn_done,
.rpc_release = nfs4_delegreturn_release,
};
static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs4_delegreturndata *data;
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
.flags = RPC_TASK_ASYNC,
};
int status = 0;
data = kzalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
data->args.bitmask = server->attr_bitmask;
nfs_copy_fh(&data->fh, NFS_FH(inode));
memcpy(&data->stateid, stateid, sizeof(data->stateid));
data->res.fattr = &data->fattr;
data->res.server = server;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (!issync)
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0)
goto out;
status = data->rpc_status;
if (status != 0)
goto out;
nfs_refresh_inode(inode, &data->fattr);
out:
rpc_put_task(task);
return status;
}
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
switch (err) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
return 0;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
/*
* sleep, with exponential backoff, and retry the LOCK operation.
*/
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
schedule_timeout_killable(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
return timeout;
}
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
struct nfs_lockt_args arg = {
.fh = NFS_FH(inode),
.fl = request,
};
struct nfs_lockt_res res = {
.denied = request,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
.rpc_argp = &arg,
.rpc_resp = &res,
.rpc_cred = state->owner->so_cred,
};
struct nfs4_lock_state *lsp;
int status;
arg.lock_owner.clientid = clp->cl_clientid;
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
arg.lock_owner.s_dev = server->s_dev;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
break;
case -NFS4ERR_DENIED:
status = 0;
}
request->fl_ops->fl_release_private(request);
out:
return status;
}
static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(state->inode),
_nfs4_proc_getlk(state, cmd, request),
&exception);
} while (exception.retry);
return err;
}
static int do_vfs_lock(struct file *file, struct file_lock *fl)
{
int res = 0;
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
res = posix_lock_file_wait(file, fl);
break;
case FL_FLOCK:
res = flock_lock_file_wait(file, fl);
break;
default:
BUG();
}
return res;
}
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
struct file_lock fl;
const struct nfs_server *server;
unsigned long timestamp;
};
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
p->server = NFS_SERVER(inode);
return p;
}
static void nfs4_locku_release_calldata(void *data)
{
struct nfs4_unlockdata *calldata = data;
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_lock_state(calldata->lsp);
put_nfs_open_context(calldata->ctx);
kfree(calldata);
}
static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
switch (task->tk_status) {
case 0:
memcpy(calldata->lsp->ls_stateid.data,
calldata->res.stateid.data,
sizeof(calldata->lsp->ls_stateid.data));
renew_lease(calldata->server, calldata->timestamp);
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
rpc_restart_call_prepare(task);
}
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
return;
}
calldata->timestamp = jiffies;
if (nfs4_setup_sequence(calldata->server,
&calldata->arg.seq_args,
&calldata->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_locku_ops = {
.rpc_call_prepare = nfs4_locku_prepare,
.rpc_call_done = nfs4_locku_done,
.rpc_release = nfs4_locku_release_calldata,
};
static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
struct nfs_open_context *ctx,
struct nfs4_lock_state *lsp,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
.rpc_cred = ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_locku_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
/* Ensure this is an unlock - when canceling a lock, the
* canceled lock is passed in, and it won't be an unlock.
*/
fl->fl_type = F_UNLCK;
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
if (data == NULL) {
nfs_free_seqid(seqid);
return ERR_PTR(-ENOMEM);
}
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
return rpc_run_task(&task_setup_data);
}
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
struct rpc_task *task;
int status = 0;
unsigned char fl_flags = request->fl_flags;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
request->fl_flags |= FL_EXISTS;
down_read(&nfsi->rwsem);
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
up_read(&nfsi->rwsem);
goto out;
}
up_read(&nfsi->rwsem);
if (status != 0)
goto out;
/* Is this a delegated lock? */
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
goto out;
lsp = request->fl_u.nfs4_fl.owner;
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
goto out;
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
status = PTR_ERR(task);
if (IS_ERR(task))
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
rpc_put_task(task);
out:
request->fl_flags = fl_flags;
return status;
}
struct nfs4_lockdata {
struct nfs_lock_args arg;
struct nfs_lock_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
struct file_lock fl;
unsigned long timestamp;
int rpc_status;
int cancelled;
struct nfs_server *server;
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
gfp_t gfp_mask)
{
struct nfs4_lockdata *p;
struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
p = kzalloc(sizeof(*p), gfp_mask);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
if (p->arg.open_seqid == NULL)
goto out_free;
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (p->arg.lock_seqid == NULL)
goto out_free_seqid;
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->arg.lock_owner.s_dev = server->s_dev;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
out_free:
kfree(p);
return NULL;
}
static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
struct nfs4_state *state = data->lsp->ls_state;
dprintk("%s: begin!\n", __func__);
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
return;
/* Do we need to do an open_to_lock_owner? */
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
return;
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
if (nfs4_setup_sequence(data->server,
&data->arg.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
nfs4_lock_prepare(task, calldata);
}
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
data->rpc_status = task->tk_status;
if (data->arg.new_lock_owner != 0) {
if (data->rpc_status == 0)
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
else
goto out;
}
if (data->rpc_status == 0) {
memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
sizeof(data->lsp->ls_stateid.data));
data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
}
out:
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
}
static void nfs4_lock_release(void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
nfs_free_seqid(data->arg.open_seqid);
if (data->cancelled != 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
if (!IS_ERR(task))
rpc_put_task_async(task);
dprintk("%s: cancelling lock!\n", __func__);
} else
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
kfree(data);
dprintk("%s: done!\n", __func__);
}
static const struct rpc_call_ops nfs4_lock_ops = {
.rpc_call_prepare = nfs4_lock_prepare,
.rpc_call_done = nfs4_lock_done,
.rpc_release = nfs4_lock_release,
};
static const struct rpc_call_ops nfs4_recover_lock_ops = {
.rpc_call_prepare = nfs4_recover_lock_prepare,
.rpc_call_done = nfs4_lock_done,
.rpc_release = nfs4_lock_release,
};
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
{
switch (error) {
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
if (new_lock_owner != 0 ||
(lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
nfs4_schedule_stateid_recovery(server, lsp->ls_state);
break;
case -NFS4ERR_STALE_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(server->nfs_client);
};
}
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
.rpc_cred = state->owner->so_cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_lock_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int ret;
dprintk("%s: begin!\n", __func__);
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
fl->fl_u.nfs4_fl.owner,
recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
if (data == NULL)
return -ENOMEM;
if (IS_SETLKW(cmd))
data->arg.block = 1;
if (recovery_type > NFS_LOCK_NEW) {
if (recovery_type == NFS_LOCK_RECLAIM)
data->arg.reclaim = NFS_LOCK_RECLAIM;
task_setup_data.callback_ops = &nfs4_recover_lock_ops;
}
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
if (ret)
nfs4_handle_setlk_error(data->server, data->lsp,
data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
dprintk("%s: done, ret = %d!\n", __func__, ret);
return ret;
}
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
/* Cache the lock if possible... */
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
err = nfs4_set_lock_state(state, request);
if (err != 0)
return err;
do {
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
switch (err) {
default:
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
} while (exception.retry);
out:
return err;
}
#if defined(CONFIG_NFS_V4_1)
static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
int status;
struct nfs_server *server = NFS_SERVER(state->inode);
status = nfs41_test_stateid(server, state);
if (status == NFS_OK)
return 0;
nfs41_free_stateid(server, state);
return nfs4_lock_expired(state, request);
}
#endif
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
int status = -ENOLCK;
if ((fl_flags & FL_POSIX) &&
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
request->fl_flags |= FL_ACCESS;
status = do_vfs_lock(request->fl_file, request);
if (status < 0)
goto out;
down_read(&nfsi->rwsem);
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(request->fl_file, request);
goto out_unlock;
}
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
if (status != 0)
goto out_unlock;
/* Note: we always want to sleep here! */
request->fl_flags = fl_flags | FL_SLEEP;
if (do_vfs_lock(request->fl_file, request) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
out_unlock:
up_read(&nfsi->rwsem);
out:
request->fl_flags = fl_flags;
return status;
}
static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_proc_setlk(state, cmd, request);
if (err == -NFS4ERR_DENIED)
err = -EAGAIN;
err = nfs4_handle_exception(NFS_SERVER(state->inode),
err, &exception);
} while (exception.retry);
return err;
}
static int
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
{
struct nfs_open_context *ctx;
struct nfs4_state *state;
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
int status;
/* verify open state */
ctx = nfs_file_open_context(filp);
state = ctx->state;
if (request->fl_start < 0 || request->fl_end < 0)
return -EINVAL;
if (IS_GETLK(cmd)) {
if (state != NULL)
return nfs4_proc_getlk(state, F_GETLK, request);
return 0;
}
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
if (request->fl_type == F_UNLCK) {
if (state != NULL)
return nfs4_proc_unlck(state, cmd, request);
return 0;
}
if (state == NULL)
return -ENOLCK;
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
break;
timeout = nfs4_set_lock_task_retry(timeout);
status = -ERESTARTSYS;
if (signalled())
break;
} while(status < 0);
return status;
}
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
err = nfs4_set_lock_state(state, fl);
if (err != 0)
goto out;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
switch (err) {
default:
printk(KERN_ERR "%s: unhandled error %d.\n",
__func__, err);
case 0:
case -ESTALE:
goto out;
case -NFS4ERR_EXPIRED:
nfs4_schedule_stateid_recovery(server, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE:
nfs4_schedule_stateid_recovery(server, state);
err = 0;
goto out;
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
err = 0;
goto out;
case -ENOMEM:
case -NFS4ERR_DENIED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
err = 0;
goto out;
case -NFS4ERR_DELAY:
break;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
static void nfs4_release_lockowner_release(void *calldata)
{
kfree(calldata);
}
const struct rpc_call_ops nfs4_release_lockowner_ops = {
.rpc_release = nfs4_release_lockowner_release,
};
void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
{
struct nfs_server *server = lsp->ls_state->owner->so_server;
struct nfs_release_lockowner_args *args;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
};
if (server->nfs_client->cl_mvops->minor_version != 0)
return;
args = kmalloc(sizeof(*args), GFP_NOFS);
if (!args)
return;
args->lock_owner.clientid = server->nfs_client->cl_clientid;
args->lock_owner.id = lsp->ls_id.id;
args->lock_owner.s_dev = server->s_dev;
msg.rpc_argp = args;
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
}
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
const void *buf, size_t buflen,
int flags, int type)
{
if (strcmp(key, "") != 0)
return -EINVAL;
return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
}
static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
void *buf, size_t buflen, int type)
{
if (strcmp(key, "") != 0)
return -EINVAL;
return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
}
static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
size_t list_len, const char *name,
size_t name_len, int type)
{
size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
return 0;
if (list && len <= list_len)
memcpy(list, XATTR_NAME_NFSV4_ACL, len);
return len;
}
/*
* nfs_fhget will use either the mounted_on_fileid or the fileid
*/
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
{
if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
(fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
(fattr->valid & NFS_ATTR_FATTR_FSID) &&
(fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
return;
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
NFS_ATTR_FATTR_NLINK;
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
fattr->nlink = 2;
}
int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page)
{
struct nfs_server *server = NFS_SERVER(dir);
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
.page = page,
.bitmask = bitmask,
};
struct nfs4_fs_locations_res res = {
.fs_locations = fs_locations,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("%s: start\n", __func__);
/* Ask for the fileid of the absent filesystem if mounted_on_fileid
* is not supported */
if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
else
bitmask[0] |= FATTR4_WORD0_FILEID;
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
{
int status;
struct nfs4_secinfo_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_secinfo_res res = {
.flavors = flavors,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
dprintk("NFS call secinfo %s\n", name->name);
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply secinfo: %d\n", status);
return status;
}
int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_secinfo(dir, name, flavors),
&exception);
} while (exception.retry);
return err;
}
#ifdef CONFIG_NFS_V4_1
/*
* Check the exchange flags returned by the server for invalid flags, having
* both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
* DS flags set.
*/
static int nfs4_check_cl_exchange_flags(u32 flags)
{
if (flags & ~EXCHGID4_FLAG_MASK_R)
goto out_inval;
if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
(flags & EXCHGID4_FLAG_USE_NON_PNFS))
goto out_inval;
if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
goto out_inval;
return NFS_OK;
out_inval:
return -NFS4ERR_INVAL;
}
static bool
nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
{
if (a->server_scope_sz == b->server_scope_sz &&
memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
return true;
return false;
}
/*
* nfs4_proc_exchange_id()
*
* Since the clientid has expired, all compounds using sessions
* associated with the stale clientid will be returning
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
* be in some phase of session reset.
*/
int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
{
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
.client = clp,
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
};
struct nfs41_exchange_id_res res = {
.client = clp,
};
int status;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
__be32 *p;
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
p = (u32 *)verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
args.verifier = &verifier;
args.id_len = scnprintf(args.id, sizeof(args.id),
"%s/%s.%s/%u",
clp->cl_ipaddr,
init_utsname()->nodename,
init_utsname()->domainname,
clp->cl_rpcclient->cl_auth->au_flavor);
res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
if (unlikely(!res.server_scope))
return -ENOMEM;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (!status)
status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
if (!status) {
if (clp->server_scope &&
!nfs41_same_server_scope(clp->server_scope,
res.server_scope)) {
dprintk("%s: server_scope mismatch detected\n",
__func__);
set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
kfree(clp->server_scope);
clp->server_scope = NULL;
}
if (!clp->server_scope)
clp->server_scope = res.server_scope;
else
kfree(res.server_scope);
}
dprintk("<-- %s status= %d\n", __func__, status);
return status;
}
struct nfs4_get_lease_time_data {
struct nfs4_get_lease_time_args *args;
struct nfs4_get_lease_time_res *res;
struct nfs_client *clp;
};
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
void *calldata)
{
int ret;
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
/* just setup sequence, do not trigger session recovery
since we're invoked within one */
ret = nfs41_setup_sequence(data->clp->cl_session,
&data->args->la_seq_args,
&data->res->lr_seq_res, 0, task);
BUG_ON(ret == -EAGAIN);
rpc_call_start(task);
dprintk("<-- %s\n", __func__);
}
/*
* Called from nfs4_state_manager thread for session setup, so don't recover
* from sequence operation or clientid errors.
*/
static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
{
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
}
struct rpc_call_ops nfs4_get_lease_time_ops = {
.rpc_call_prepare = nfs4_get_lease_time_prepare,
.rpc_call_done = nfs4_get_lease_time_done,
};
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
};
struct nfs4_get_lease_time_data data = {
.args = &args,
.res = &res,
.clp = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
.rpc_argp = &args,
.rpc_resp = &res,
};
struct rpc_task_setup task_setup = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_get_lease_time_ops,
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
int status;
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
status = PTR_ERR(task);
else {
status = task->tk_status;
rpc_put_task(task);
}
dprintk("<-- %s return %d\n", __func__, status);
return status;
}
/*
* Reset a slot table
*/
static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
int ivalue)
{
struct nfs4_slot *new = NULL;
int i;
int ret = 0;
dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
max_reqs, tbl->max_slots);
/* Does the newly negotiated max_reqs match the existing slot table? */
if (max_reqs != tbl->max_slots) {
ret = -ENOMEM;
new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
GFP_NOFS);
if (!new)
goto out;
ret = 0;
kfree(tbl->slots);
}
spin_lock(&tbl->slot_tbl_lock);
if (new) {
tbl->slots = new;
tbl->max_slots = max_reqs;
}
for (i = 0; i < tbl->max_slots; ++i)
tbl->slots[i].seq_nr = ivalue;
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
}
/* Destroy the slot table */
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
{
if (session->fc_slot_table.slots != NULL) {
kfree(session->fc_slot_table.slots);
session->fc_slot_table.slots = NULL;
}
if (session->bc_slot_table.slots != NULL) {
kfree(session->bc_slot_table.slots);
session->bc_slot_table.slots = NULL;
}
return;
}
/*
* Initialize slot table
*/
static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
int max_slots, int ivalue)
{
struct nfs4_slot *slot;
int ret = -ENOMEM;
BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
if (!slot)
goto out;
ret = 0;
spin_lock(&tbl->slot_tbl_lock);
tbl->max_slots = max_slots;
tbl->slots = slot;
tbl->highest_used_slotid = -1; /* no slot is currently used */
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
}
/*
* Initialize or reset the forechannel and backchannel tables
*/
static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
{
struct nfs4_slot_table *tbl;
int status;
dprintk("--> %s\n", __func__);
/* Fore channel */
tbl = &ses->fc_slot_table;
if (tbl->slots == NULL) {
status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status) /* -ENOMEM */
return status;
} else {
status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status)
return status;
}
/* Back channel */
tbl = &ses->bc_slot_table;
if (tbl->slots == NULL) {
status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
if (status)
/* Fore and back channel share a connection so get
* both slot tables or neither */
nfs4_destroy_slot_tables(ses);
} else
status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
return status;
}
struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
{
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
if (!session)
return NULL;
tbl = &session->fc_slot_table;
tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
init_completion(&tbl->complete);
tbl = &session->bc_slot_table;
tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
init_completion(&tbl->complete);
session->session_state = 1<<NFS4_SESSION_INITING;
session->clp = clp;
return session;
}
void nfs4_destroy_session(struct nfs4_session *session)
{
nfs4_proc_destroy_session(session);
dprintk("%s Destroy backchannel for xprt %p\n",
__func__, session->clp->cl_rpcclient->cl_xprt);
xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
NFS41_BC_MIN_CALLBACKS);
nfs4_destroy_slot_tables(session);
kfree(session);
}
/*
* Initialize the values to be used by the client in CREATE_SESSION
* If nfs4_init_session set the fore channel request and response sizes,
* use them.
*
* Set the back channel max_resp_sz_cached to zero to force the client to
* always set csa_cachethis to FALSE because the current implementation
* of the back channel DRC only supports caching the CB_SEQUENCE operation.
*/
static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
{
struct nfs4_session *session = args->client->cl_session;
unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
mxresp_sz = session->fc_attrs.max_resp_sz;
if (mxrqst_sz == 0)
mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
if (mxresp_sz == 0)
mxresp_sz = NFS_MAX_FILE_IO_SIZE;
/* Fore channel attributes */
args->fc_attrs.max_rqst_sz = mxrqst_sz;
args->fc_attrs.max_resp_sz = mxresp_sz;
args->fc_attrs.max_ops = NFS4_MAX_OPS;
args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_ops=%u max_reqs=%u\n",
__func__,
args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
/* Back channel attributes */
args->bc_attrs.max_rqst_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz = PAGE_SIZE;
args->bc_attrs.max_resp_sz_cached = 0;
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
args->bc_attrs.max_reqs = 1;
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
__func__,
args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
args->bc_attrs.max_reqs);
}
static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
struct nfs4_channel_attrs *sent = &args->fc_attrs;
struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
if (rcvd->max_resp_sz > sent->max_resp_sz)
return -EINVAL;
/*
* Our requested max_ops is the minimum we need; we're not
* prepared to break up compounds into smaller pieces than that.
* So, no point even trying to continue if the server won't
* cooperate:
*/
if (rcvd->max_ops < sent->max_ops)
return -EINVAL;
if (rcvd->max_reqs == 0)
return -EINVAL;
return 0;
}
static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
struct nfs4_channel_attrs *sent = &args->bc_attrs;
struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
return -EINVAL;
if (rcvd->max_resp_sz < sent->max_resp_sz)
return -EINVAL;
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
return -EINVAL;
/* These would render the backchannel useless: */
if (rcvd->max_ops == 0)
return -EINVAL;
if (rcvd->max_reqs == 0)
return -EINVAL;
return 0;
}
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
struct nfs4_session *session)
{
int ret;
ret = nfs4_verify_fore_channel_attrs(args, session);
if (ret)
return ret;
return nfs4_verify_back_channel_attrs(args, session);
}
static int _nfs4_proc_create_session(struct nfs_client *clp)
{
struct nfs4_session *session = clp->cl_session;
struct nfs41_create_session_args args = {
.client = clp,
.cb_program = NFS4_CALLBACK,
};
struct nfs41_create_session_res res = {
.client = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
nfs4_init_channel_attrs(&args);
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (!status)
/* Verify the session's negotiated channel_attrs values */
status = nfs4_verify_channel_attrs(&args, session);
if (!status) {
/* Increment the clientid slot sequence id */
clp->cl_seqid++;
}
return status;
}
/*
* Issues a CREATE_SESSION operation to the server.
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
int nfs4_proc_create_session(struct nfs_client *clp)
{
int status;
unsigned *ptr;
struct nfs4_session *session = clp->cl_session;
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
status = _nfs4_proc_create_session(clp);
if (status)
goto out;
/* Init or reset the session slot tables */
status = nfs4_setup_session_slot_tables(session);
dprintk("slot table setup returned %d\n", status);
if (status)
goto out;
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
out:
dprintk("<-- %s\n", __func__);
return status;
}
/*
* Issue the over-the-wire RPC DESTROY_SESSION.
* The caller must serialize access to this routine.
*/
int nfs4_proc_destroy_session(struct nfs4_session *session)
{
int status = 0;
struct rpc_message msg;
dprintk("--> nfs4_proc_destroy_session\n");
/* session is still being setup */
if (session->clp->cl_cons_state != NFS_CS_READY)
return status;
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
msg.rpc_argp = session;
msg.rpc_resp = NULL;
msg.rpc_cred = NULL;
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status)
printk(KERN_WARNING
"Got error %d from the server on DESTROY_SESSION. "
"Session has been destroyed regardless...\n", status);
dprintk("<-- nfs4_proc_destroy_session\n");
return status;
}
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_session *session;
unsigned int rsize, wsize;
int ret;
if (!nfs4_has_session(clp))
return 0;
session = clp->cl_session;
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
return 0;
rsize = server->rsize;
if (rsize == 0)
rsize = NFS_MAX_FILE_IO_SIZE;
wsize = server->wsize;
if (wsize == 0)
wsize = NFS_MAX_FILE_IO_SIZE;
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
ret = nfs4_recover_expired_lease(server);
if (!ret)
ret = nfs4_check_client_ready(clp);
return ret;
}
int nfs4_init_ds_session(struct nfs_client *clp)
{
struct nfs4_session *session = clp->cl_session;
int ret;
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
return 0;
ret = nfs4_client_recover_expired_lease(clp);
if (!ret)
/* Test for the DS role */
if (!is_ds_client(clp))
ret = -ENODEV;
if (!ret)
ret = nfs4_check_client_ready(clp);
return ret;
}
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
/*
* Renew the cl_session lease.
*/
struct nfs4_sequence_data {
struct nfs_client *clp;
struct nfs4_sequence_args args;
struct nfs4_sequence_res res;
};
static void nfs41_sequence_release(void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(calldata);
}
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
{
switch(task->tk_status) {
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
}
return 0;
}
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
return;
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (atomic_read(&clp->cl_count) == 1)
goto out;
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
out:
dprintk("<-- %s\n", __func__);
}
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
{
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_args *args;
struct nfs4_sequence_res *res;
args = task->tk_msg.rpc_argp;
res = task->tk_msg.rpc_resp;
if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
return;
rpc_call_start(task);
}
static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_call_done = nfs41_sequence_call_done,
.rpc_call_prepare = nfs41_sequence_prepare,
.rpc_release = nfs41_sequence_release,
};
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
struct nfs4_sequence_data *calldata;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs41_sequence_ops,
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
};
if (!atomic_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL) {
nfs_put_client(clp);
return ERR_PTR(-ENOMEM);
}
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
calldata->clp = clp;
task_setup_data.callback_data = calldata;
return rpc_run_task(&task_setup_data);
}
static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_task *task;
int ret = 0;
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
return 0;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task))
ret = PTR_ERR(task);
else
rpc_put_task_async(task);
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_task *task;
int ret;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto out;
}
ret = rpc_wait_for_completion_task(task);
if (!ret) {
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
if (task->tk_status == 0)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
ret = task->tk_status;
}
rpc_put_task(task);
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs4_reclaim_complete_data {
struct nfs_client *clp;
struct nfs41_reclaim_complete_args arg;
struct nfs41_reclaim_complete_res res;
};
static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
if (nfs41_setup_sequence(calldata->clp->cl_session,
&calldata->arg.seq_args,
&calldata->res.seq_res, 0, task))
return;
rpc_call_start(task);
}
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
{
switch(task->tk_status) {
case 0:
case -NFS4ERR_COMPLETE_ALREADY:
case -NFS4ERR_WRONG_CRED: /* What to do here? */
break;
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
}
return 0;
}
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_res *res = &calldata->res.seq_res;
dprintk("--> %s\n", __func__);
if (!nfs41_sequence_done(task, res))
return;
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
}
static void nfs4_free_reclaim_complete_data(void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
kfree(calldata);
}
static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
.rpc_call_prepare = nfs4_reclaim_complete_prepare,
.rpc_call_done = nfs4_reclaim_complete_done,
.rpc_release = nfs4_free_reclaim_complete_data,
};
/*
* Issue a global reclaim complete.
*/
static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
{
struct nfs4_reclaim_complete_data *calldata;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
dprintk("--> %s\n", __func__);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL)
goto out;
calldata->clp = clp;
calldata->arg.one_fs = 0;
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out;
}
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
static void
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
/* Note the is a race here, where a CB_LAYOUTRECALL can come in
* right now covering the LAYOUTGET we are about to send.
* However, that is not so catastrophic, and there seems
* to be no way to prevent it completely.
*/
if (nfs4_setup_sequence(server, &lgp->args.seq_args,
&lgp->res.seq_res, 0, task))
return;
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
NFS_I(lgp->args.inode)->layout,
lgp->args.ctx->state)) {
rpc_exit(task, NFS4_OK);
return;
}
rpc_call_start(task);
}
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lgp->res.seq_res))
return;
switch (task->tk_status) {
case 0:
break;
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
task->tk_status = -NFS4ERR_DELAY;
/* Fall through */
default:
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_layoutget_call_ops = {
.rpc_call_prepare = nfs4_layoutget_prepare,
.rpc_call_done = nfs4_layoutget_done,
.rpc_release = nfs4_layoutget_release,
};
int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
{
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
.rpc_argp = &lgp->args,
.rpc_resp = &lgp->res,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_layoutget_call_ops,
.callback_data = lgp,
.flags = RPC_TASK_ASYNC,
};
int status = 0;
dprintk("--> %s\n", __func__);
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
if (status == 0)
status = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
static void
nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
&lrp->res.seq_res, 0, task))
return;
rpc_call_start(task);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
struct nfs_server *server;
struct pnfs_layout_hdr *lo = lrp->args.layout;
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lrp->res.seq_res))
return;
server = NFS_SERVER(lrp->args.inode);
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
spin_lock(&lo->plh_inode->i_lock);
if (task->tk_status == 0) {
if (lrp->res.lrs_present) {
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
} else
BUG_ON(!list_empty(&lo->plh_segs));
}
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutreturn_release(void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
put_layout_hdr(lrp->args.layout);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
.rpc_call_prepare = nfs4_layoutreturn_prepare,
.rpc_call_done = nfs4_layoutreturn_done,
.rpc_release = nfs4_layoutreturn_release,
};
int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
{
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
.rpc_argp = &lrp->args,
.rpc_resp = &lrp->res,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = lrp->clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_layoutreturn_call_ops,
.callback_data = lrp,
};
int status;
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = task->tk_status;
dprintk("<-- %s status=%d\n", __func__, status);
rpc_put_task(task);
return status;
}
/*
* Retrieve the list of Data Server devices from the MDS.
*/
static int _nfs4_getdevicelist(struct nfs_server *server,
const struct nfs_fh *fh,
struct pnfs_devicelist *devlist)
{
struct nfs4_getdevicelist_args args = {
.fh = fh,
.layoutclass = server->pnfs_curr_ld->id,
};
struct nfs4_getdevicelist_res res = {
.devlist = devlist,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
&res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
int nfs4_proc_getdevicelist(struct nfs_server *server,
const struct nfs_fh *fh,
struct pnfs_devicelist *devlist)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_getdevicelist(server, fh, devlist),
&exception);
} while (exception.retry);
dprintk("%s: err=%d, num_devs=%u\n", __func__,
err, devlist->num_devs);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
static int
_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
struct nfs4_getdeviceinfo_args args = {
.pdev = pdev,
};
struct nfs4_getdeviceinfo_res res = {
.pdev = pdev,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
.rpc_argp = &args,
.rpc_resp = &res,
};
int status;
dprintk("--> %s\n", __func__);
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_proc_getdeviceinfo(server, pdev),
&exception);
} while (exception.retry);
return err;
}
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
if (nfs4_setup_sequence(server, &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
}
static void
nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) { /* Just ignore these failures */
case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
case NFS4ERR_BADLAYOUT: /* no layout */
case NFS4ERR_GRACE: /* loca_recalim always false */
task->tk_status = 0;
}
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
if (task->tk_status == 0)
nfs_post_op_update_inode_force_wcc(data->args.inode,
data->res.fattr);
}
static void nfs4_layoutcommit_release(void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct pnfs_layout_segment *lseg, *tmp;
unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
pnfs_cleanup_layoutcommit(data);
/* Matched by references in pnfs_set_layoutcommit */
list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
list_del_init(&lseg->pls_lc_list);
if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
&lseg->pls_flags))
put_lseg(lseg);
}
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
smp_mb__after_clear_bit();
wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
put_rpccred(data->cred);
kfree(data);
}
static const struct rpc_call_ops nfs4_layoutcommit_ops = {
.rpc_call_prepare = nfs4_layoutcommit_prepare,
.rpc_call_done = nfs4_layoutcommit_done,
.rpc_release = nfs4_layoutcommit_release,
};
int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(data->args.inode),
.rpc_message = &msg,
.callback_ops = &nfs4_layoutcommit_ops,
.callback_data = data,
.flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
int status = 0;
dprintk("NFS: %4d initiating layoutcommit call. sync %d "
"lbw: %llu inode %lu\n",
data->task.tk_pid, sync,
data->args.lastbytewritten,
data->args.inode->i_ino);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
if (sync == false)
goto out;
status = nfs4_wait_for_completion_rpc_task(task);
if (status != 0)
goto out;
status = task->tk_status;
out:
dprintk("%s: status %d\n", __func__, status);
rpc_put_task(task);
return status;
}
static int
_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
{
struct nfs41_secinfo_no_name_args args = {
.style = SECINFO_STYLE_CURRENT_FH,
};
struct nfs4_secinfo_res res = {
.flavors = flavors,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
.rpc_argp = &args,
.rpc_resp = &res,
};
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
}
static int
nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
case -NFS4ERR_NOTSUPP:
break;
default:
err = nfs4_handle_exception(server, err, &exception);
}
} while (exception.retry);
return err;
}
static int
nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int err;
struct page *page;
rpc_authflavor_t flavor;
struct nfs4_secinfo_flavors *flavors;
page = alloc_page(GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
flavors = page_address(page);
err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
/*
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
err = nfs4_find_root_sec(server, fhandle, info);
goto out_freepage;
}
if (err)
goto out_freepage;
flavor = nfs_find_best_sec(flavors);
if (err == 0)
err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
out_freepage:
put_page(page);
if (err == -EACCES)
return -EPERM;
out:
return err;
}
static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
{
int status;
struct nfs41_test_stateid_args args = {
.stateid = &state->stateid,
};
struct nfs41_test_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
args.seq_args.sa_session = res.seq_res.sr_session = NULL;
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
return status;
}
static int nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs41_test_stateid(server, state),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_free_stateid(struct nfs_server *server, struct nfs4_state *state)
{
int status;
struct nfs41_free_stateid_args args = {
.stateid = &state->stateid,
};
struct nfs41_free_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
args.seq_args.sa_session = res.seq_res.sr_session = NULL;
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
return status;
}
static int nfs41_free_stateid(struct nfs_server *server, struct nfs4_state *state)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(server,
_nfs4_free_stateid(server, state),
&exception);
} while (exception.retry);
return err;
}
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
};
#if defined(CONFIG_NFS_V4_1)
struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
.reclaim_complete = nfs41_proc_reclaim_complete,
};
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
};
#if defined(CONFIG_NFS_V4_1)
struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs41_open_expired,
.recover_lock = nfs41_lock_expired,
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
.sched_state_renewal = nfs4_proc_async_renew,
.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
.renew_lease = nfs4_proc_renew,
};
#if defined(CONFIG_NFS_V4_1)
struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
.sched_state_renewal = nfs41_proc_async_sequence,
.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
.renew_lease = nfs4_proc_sequence,
};
#endif
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.minor_version = 0,
.call_sync = _nfs4_call_sync,
.validate_stateid = nfs4_validate_delegation_stateid,
.find_root_sec = nfs4_find_root_sec,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
.state_renewal_ops = &nfs40_state_renewal_ops,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
.call_sync = _nfs4_call_sync_session,
.validate_stateid = nfs41_validate_delegation_stateid,
.find_root_sec = nfs41_find_root_sec,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
};
#endif
const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
[0] = &nfs_v4_0_minor_ops,
#if defined(CONFIG_NFS_V4_1)
[1] = &nfs_v4_1_minor_ops,
#endif
};
static const struct inode_operations nfs4_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.getxattr = generic_getxattr,
.setxattr = generic_setxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
};
const struct nfs_rpc_ops nfs_v4_clientops = {
.version = 4, /* protocol version */
.dentry_ops = &nfs4_dentry_operations,
.dir_inode_ops = &nfs4_dir_inode_operations,
.file_inode_ops = &nfs4_file_inode_operations,
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
.create = nfs4_proc_create,
.remove = nfs4_proc_remove,
.unlink_setup = nfs4_proc_unlink_setup,
.unlink_done = nfs4_proc_unlink_done,
.rename = nfs4_proc_rename,
.rename_setup = nfs4_proc_rename_setup,
.rename_done = nfs4_proc_rename_done,
.link = nfs4_proc_link,
.symlink = nfs4_proc_symlink,
.mkdir = nfs4_proc_mkdir,
.rmdir = nfs4_proc_remove,
.readdir = nfs4_proc_readdir,
.mknod = nfs4_proc_mknod,
.statfs = nfs4_proc_statfs,
.fsinfo = nfs4_proc_fsinfo,
.pathconf = nfs4_proc_pathconf,
.set_capabilities = nfs4_server_capabilities,
.decode_dirent = nfs4_decode_dirent,
.read_setup = nfs4_proc_read_setup,
.read_done = nfs4_read_done,
.write_setup = nfs4_proc_write_setup,
.write_done = nfs4_write_done,
.commit_setup = nfs4_proc_commit_setup,
.commit_done = nfs4_commit_done,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
.secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
.prefix = XATTR_NAME_NFSV4_ACL,
.list = nfs4_xattr_list_nfs4_acl,
.get = nfs4_xattr_get_nfs4_acl,
.set = nfs4_xattr_set_nfs4_acl,
};
const struct xattr_handler *nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
NULL
};
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3527_0 |
crossvul-cpp_data_bad_2158_0 | /*
* Copyright (c) 2006, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Copyright (C) 2006-2008 Intel Corporation
* Copyright IBM Corporation, 2008
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Author: Allen M. Kay <allen.m.kay@intel.com>
* Author: Weidong Han <weidong.han@intel.com>
* Author: Ben-Ami Yassour <benami@il.ibm.com>
*/
#include <linux/list.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/dmar.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
static bool allow_unsafe_assigned_interrupts;
module_param_named(allow_unsafe_assigned_interrupts,
allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
"Enable device assignment on platforms without interrupt remapping support.");
static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages);
static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
unsigned long size)
{
gfn_t end_gfn;
pfn_t pfn;
pfn = gfn_to_pfn_memslot(slot, gfn);
end_gfn = gfn + (size >> PAGE_SHIFT);
gfn += 1;
if (is_error_noslot_pfn(pfn))
return pfn;
while (gfn < end_gfn)
gfn_to_pfn_memslot(slot, gfn++);
return pfn;
}
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
pfn_t pfn;
int r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int flags;
/* check if iommu exists and in use */
if (!domain)
return 0;
gfn = slot->base_gfn;
end_gfn = gfn + slot->npages;
flags = IOMMU_READ;
if (!(slot->flags & KVM_MEM_READONLY))
flags |= IOMMU_WRITE;
if (!kvm->arch.iommu_noncoherent)
flags |= IOMMU_CACHE;
while (gfn < end_gfn) {
unsigned long page_size;
/* Check if already mapped */
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
gfn += 1;
continue;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size(kvm, gfn);
/* Make sure the page_size does not exceed the memslot */
while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
page_size >>= 1;
/* Make sure gfn is aligned to the page size we want to map */
while ((gfn << PAGE_SHIFT) & (page_size - 1))
page_size >>= 1;
/* Make sure hva is aligned to the page size we want to map */
while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
page_size >>= 1;
/*
* Pin all pages we are about to map in memory. This is
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
goto unmap_pages;
}
gfn += page_size >> PAGE_SHIFT;
}
return 0;
unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
return r;
}
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int idx, r = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
if (kvm->arch.iommu_noncoherent)
kvm_arch_register_noncoherent_dma(kvm);
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
r = kvm_iommu_map_pages(kvm, memslot);
if (r)
break;
}
srcu_read_unlock(&kvm->srcu, idx);
return r;
}
int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
struct pci_dev *pdev = NULL;
struct iommu_domain *domain = kvm->arch.iommu_domain;
int r;
bool noncoherent;
/* check if iommu exists and in use */
if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
r = iommu_attach_device(domain, &pdev->dev);
if (r) {
dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
return r;
}
noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain,
IOMMU_CAP_CACHE_COHERENCY);
/* Check if need to update IOMMU page table for guest memory */
if (noncoherent != kvm->arch.iommu_noncoherent) {
kvm_iommu_unmap_memslots(kvm);
kvm->arch.iommu_noncoherent = noncoherent;
r = kvm_iommu_map_memslots(kvm);
if (r)
goto out_unmap;
}
pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
dev_info(&pdev->dev, "kvm assign device\n");
return 0;
out_unmap:
kvm_iommu_unmap_memslots(kvm);
return r;
}
int kvm_deassign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
struct pci_dev *pdev = NULL;
/* check if iommu exists and in use */
if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV;
iommu_detach_device(domain, &pdev->dev);
pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
dev_info(&pdev->dev, "kvm deassign device\n");
return 0;
}
int kvm_iommu_map_guest(struct kvm *kvm)
{
int r;
if (!iommu_present(&pci_bus_type)) {
printk(KERN_ERR "%s: iommu not found\n", __func__);
return -ENODEV;
}
mutex_lock(&kvm->slots_lock);
kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
if (!kvm->arch.iommu_domain) {
r = -ENOMEM;
goto out_unlock;
}
if (!allow_unsafe_assigned_interrupts &&
!iommu_domain_has_cap(kvm->arch.iommu_domain,
IOMMU_CAP_INTR_REMAP)) {
printk(KERN_WARNING "%s: No interrupt remapping support,"
" disallowing device assignment."
" Re-enble with \"allow_unsafe_assigned_interrupts=1\""
" module option.\n", __func__);
iommu_domain_free(kvm->arch.iommu_domain);
kvm->arch.iommu_domain = NULL;
r = -EPERM;
goto out_unlock;
}
r = kvm_iommu_map_memslots(kvm);
if (r)
kvm_iommu_unmap_memslots(kvm);
out_unlock:
mutex_unlock(&kvm->slots_lock);
return r;
}
static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
{
unsigned long i;
for (i = 0; i < npages; ++i)
kvm_release_pfn_clean(pfn + i);
}
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages)
{
struct iommu_domain *domain;
gfn_t end_gfn, gfn;
pfn_t pfn;
u64 phys;
domain = kvm->arch.iommu_domain;
end_gfn = base_gfn + npages;
gfn = base_gfn;
/* check if iommu exists and in use */
if (!domain)
return;
while (gfn < end_gfn) {
unsigned long unmap_pages;
size_t size;
/* Get physical address */
phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
if (!phys) {
gfn++;
continue;
}
pfn = phys >> PAGE_SHIFT;
/* Unmap address from IO address space */
size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
unmap_pages = 1ULL << get_order(size);
/* Unpin all pages we just unmapped to not leak any memory */
kvm_unpin_pages(kvm, pfn, unmap_pages);
gfn += unmap_pages;
}
}
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
}
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int idx;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots)
kvm_iommu_unmap_pages(kvm, memslot);
srcu_read_unlock(&kvm->srcu, idx);
if (kvm->arch.iommu_noncoherent)
kvm_arch_unregister_noncoherent_dma(kvm);
return 0;
}
int kvm_iommu_unmap_guest(struct kvm *kvm)
{
struct iommu_domain *domain = kvm->arch.iommu_domain;
/* check if iommu exists and in use */
if (!domain)
return 0;
mutex_lock(&kvm->slots_lock);
kvm_iommu_unmap_memslots(kvm);
kvm->arch.iommu_domain = NULL;
kvm->arch.iommu_noncoherent = false;
mutex_unlock(&kvm->slots_lock);
iommu_domain_free(domain);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_2158_0 |
crossvul-cpp_data_good_5814_0 | /*
* DSP utils
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DSP utils
*/
#include "libavutil/attributes.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "avcodec.h"
#include "copy_block.h"
#include "dct.h"
#include "dsputil.h"
#include "simple_idct.h"
#include "faandct.h"
#include "faanidct.h"
#include "imgconvert.h"
#include "mathops.h"
#include "mpegvideo.h"
#include "config.h"
#include "diracdsp.h"
uint32_t ff_squareTbl[512] = {0, };
#define BIT_DEPTH 16
#include "dsputil_template.c"
#undef BIT_DEPTH
#define BIT_DEPTH 8
#include "dsputil_template.c"
// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL/255 * 0x7f)
#define pb_80 (~0UL/255 * 0x80)
/* Specific zigzag scan for 248 idct. NOTE that unlike the
specification, we interleave the fields */
const uint8_t ff_zigzag248_direct[64] = {
0, 8, 1, 9, 16, 24, 2, 10,
17, 25, 32, 40, 48, 56, 33, 41,
18, 26, 3, 11, 4, 12, 19, 27,
34, 42, 49, 57, 50, 58, 35, 43,
20, 28, 5, 13, 6, 14, 21, 29,
36, 44, 51, 59, 52, 60, 37, 45,
22, 30, 7, 15, 23, 31, 38, 46,
53, 61, 54, 62, 39, 47, 55, 63,
};
/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
DECLARE_ALIGNED(16, uint16_t, ff_inv_zigzag_direct16)[64];
const uint8_t ff_alternate_horizontal_scan[64] = {
0, 1, 2, 3, 8, 9, 16, 17,
10, 11, 4, 5, 6, 7, 15, 14,
13, 12, 19, 18, 24, 25, 32, 33,
26, 27, 20, 21, 22, 23, 28, 29,
30, 31, 34, 35, 40, 41, 48, 49,
42, 43, 36, 37, 38, 39, 44, 45,
46, 47, 50, 51, 56, 57, 58, 59,
52, 53, 54, 55, 60, 61, 62, 63,
};
const uint8_t ff_alternate_vertical_scan[64] = {
0, 8, 16, 24, 1, 9, 2, 10,
17, 25, 32, 40, 48, 56, 57, 49,
41, 33, 26, 18, 3, 11, 4, 12,
19, 27, 34, 42, 50, 58, 35, 43,
51, 59, 20, 28, 5, 13, 6, 14,
21, 29, 36, 44, 52, 60, 37, 45,
53, 61, 22, 30, 7, 15, 23, 31,
38, 46, 54, 62, 39, 47, 55, 63,
};
/* Input permutation for the simple_idct_mmx */
static const uint8_t simple_mmx_permutation[64]={
0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
};
static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
const uint8_t *src_scantable)
{
int i;
int end;
st->scantable= src_scantable;
for(i=0; i<64; i++){
int j;
j = src_scantable[i];
st->permutated[i] = permutation[j];
}
end=-1;
for(i=0; i<64; i++){
int j;
j = st->permutated[i];
if(j>end) end=j;
st->raster_end[i]= end;
}
}
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
int idct_permutation_type)
{
int i;
switch(idct_permutation_type){
case FF_NO_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= i;
break;
case FF_LIBMPEG2_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
break;
case FF_SIMPLE_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= simple_mmx_permutation[i];
break;
case FF_TRANSPOSE_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= ((i&7)<<3) | (i>>3);
break;
case FF_PARTTRANS_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= (i&0x24) | ((i&3)<<3) | ((i>>3)&3);
break;
case FF_SSE2_IDCT_PERM:
for(i=0; i<64; i++)
idct_permutation[i]= (i&0x38) | idct_sse2_row_perm[i&7];
break;
default:
av_log(NULL, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n");
}
}
static int pix_sum_c(uint8_t * pix, int line_size)
{
int s, i, j;
s = 0;
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j += 8) {
s += pix[0];
s += pix[1];
s += pix[2];
s += pix[3];
s += pix[4];
s += pix[5];
s += pix[6];
s += pix[7];
pix += 8;
}
pix += line_size - 16;
}
return s;
}
static int pix_norm1_c(uint8_t * pix, int line_size)
{
int s, i, j;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j += 8) {
#if 0
s += sq[pix[0]];
s += sq[pix[1]];
s += sq[pix[2]];
s += sq[pix[3]];
s += sq[pix[4]];
s += sq[pix[5]];
s += sq[pix[6]];
s += sq[pix[7]];
#else
#if HAVE_FAST_64BIT
register uint64_t x=*(uint64_t*)pix;
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
s += sq[(x>>32)&0xff];
s += sq[(x>>40)&0xff];
s += sq[(x>>48)&0xff];
s += sq[(x>>56)&0xff];
#else
register uint32_t x=*(uint32_t*)pix;
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
x=*(uint32_t*)(pix+4);
s += sq[x&0xff];
s += sq[(x>>8)&0xff];
s += sq[(x>>16)&0xff];
s += sq[(x>>24)&0xff];
#endif
#endif
pix += 8;
}
pix += line_size - 16;
}
return s;
}
static void bswap_buf(uint32_t *dst, const uint32_t *src, int w){
int i;
for(i=0; i+8<=w; i+=8){
dst[i+0]= av_bswap32(src[i+0]);
dst[i+1]= av_bswap32(src[i+1]);
dst[i+2]= av_bswap32(src[i+2]);
dst[i+3]= av_bswap32(src[i+3]);
dst[i+4]= av_bswap32(src[i+4]);
dst[i+5]= av_bswap32(src[i+5]);
dst[i+6]= av_bswap32(src[i+6]);
dst[i+7]= av_bswap32(src[i+7]);
}
for(;i<w; i++){
dst[i+0]= av_bswap32(src[i+0]);
}
}
static void bswap16_buf(uint16_t *dst, const uint16_t *src, int len)
{
while (len--)
*dst++ = av_bswap16(*src++);
}
static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
s += sq[pix1[4] - pix2[4]];
s += sq[pix1[5] - pix2[5]];
s += sq[pix1[6] - pix2[6]];
s += sq[pix1[7] - pix2[7]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
s += sq[pix1[ 0] - pix2[ 0]];
s += sq[pix1[ 1] - pix2[ 1]];
s += sq[pix1[ 2] - pix2[ 2]];
s += sq[pix1[ 3] - pix2[ 3]];
s += sq[pix1[ 4] - pix2[ 4]];
s += sq[pix1[ 5] - pix2[ 5]];
s += sq[pix1[ 6] - pix2[ 6]];
s += sq[pix1[ 7] - pix2[ 7]];
s += sq[pix1[ 8] - pix2[ 8]];
s += sq[pix1[ 9] - pix2[ 9]];
s += sq[pix1[10] - pix2[10]];
s += sq[pix1[11] - pix2[11]];
s += sq[pix1[12] - pix2[12]];
s += sq[pix1[13] - pix2[13]];
s += sq[pix1[14] - pix2[14]];
s += sq[pix1[15] - pix2[15]];
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static void diff_pixels_c(int16_t *av_restrict block, const uint8_t *s1,
const uint8_t *s2, int stride){
int i;
/* read the pixels */
for(i=0;i<8;i++) {
block[0] = s1[0] - s2[0];
block[1] = s1[1] - s2[1];
block[2] = s1[2] - s2[2];
block[3] = s1[3] - s2[3];
block[4] = s1[4] - s2[4];
block[5] = s1[5] - s2[5];
block[6] = s1[6] - s2[6];
block[7] = s1[7] - s2[7];
s1 += stride;
s2 += stride;
block += 8;
}
}
static void put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<8;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels[4] = av_clip_uint8(block[4]);
pixels[5] = av_clip_uint8(block[5]);
pixels[6] = av_clip_uint8(block[6]);
pixels[7] = av_clip_uint8(block[7]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels += line_size;
block += 8;
}
}
static void put_signed_pixels_clamped_c(const int16_t *block,
uint8_t *av_restrict pixels,
int line_size)
{
int i, j;
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++) {
if (*block < -128)
*pixels = 0;
else if (*block > 127)
*pixels = 255;
else
*pixels = (uint8_t)(*block + 128);
block++;
pixels++;
}
pixels += (line_size - 8);
}
}
static void add_pixels8_c(uint8_t *av_restrict pixels,
int16_t *block,
int line_size)
{
int i;
for(i=0;i<8;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels[4] += block[4];
pixels[5] += block[5];
pixels[6] += block[6];
pixels[7] += block[7];
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<8;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels[4] = av_clip_uint8(pixels[4] + block[4]);
pixels[5] = av_clip_uint8(pixels[5] + block[5]);
pixels[6] = av_clip_uint8(pixels[6] + block[6]);
pixels[7] = av_clip_uint8(pixels[7] + block[7]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels += line_size;
block += 8;
}
}
static int sum_abs_dctelem_c(int16_t *block)
{
int sum=0, i;
for(i=0; i<64; i++)
sum+= FFABS(block[i]);
return sum;
}
static void fill_block16_c(uint8_t *block, uint8_t value, int line_size, int h)
{
int i;
for (i = 0; i < h; i++) {
memset(block, value, 16);
block += line_size;
}
}
static void fill_block8_c(uint8_t *block, uint8_t value, int line_size, int h)
{
int i;
for (i = 0; i < h; i++) {
memset(block, value, 8);
block += line_size;
}
}
#define avg2(a,b) ((a+b+1)>>1)
#define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
{
const int A=(16-x16)*(16-y16);
const int B=( x16)*(16-y16);
const int C=(16-x16)*( y16);
const int D=( x16)*( y16);
int i;
for(i=0; i<h; i++)
{
dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
dst+= stride;
src+= stride;
}
}
void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
{
int y, vx, vy;
const int s= 1<<shift;
width--;
height--;
for(y=0; y<h; y++){
int x;
vx= ox;
vy= oy;
for(x=0; x<8; x++){ //XXX FIXME optimize
int src_x, src_y, frac_x, frac_y, index;
src_x= vx>>16;
src_y= vy>>16;
frac_x= src_x&(s-1);
frac_y= src_y&(s-1);
src_x>>=shift;
src_y>>=shift;
if((unsigned)src_x < width){
if((unsigned)src_y < height){
index= src_x + src_y*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ src[index +1]* frac_x )*(s-frac_y)
+ ( src[index+stride ]*(s-frac_x)
+ src[index+stride+1]* frac_x )* frac_y
+ r)>>(shift*2);
}else{
index= src_x + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ src[index +1]* frac_x )*s
+ r)>>(shift*2);
}
}else{
if((unsigned)src_y < height){
index= av_clip(src_x, 0, width) + src_y*stride;
dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
+ src[index+stride ]* frac_y )*s
+ r)>>(shift*2);
}else{
index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
dst[y*stride + x]= src[index ];
}
}
vx+= dxx;
vy+= dyx;
}
ox += dxy;
oy += dyy;
}
}
static inline void put_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
switch(width){
case 2: put_pixels2_8_c (dst, src, stride, height); break;
case 4: put_pixels4_8_c (dst, src, stride, height); break;
case 8: put_pixels8_8_c (dst, src, stride, height); break;
case 16:put_pixels16_8_c(dst, src, stride, height); break;
}
}
static inline void put_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(2*src[j] + src[j+1] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(src[j] + 2*src[j+1] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(2*src[j] + src[j+stride] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (683*(src[j] + 2*src[j+stride] + 1)) >> 11;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void put_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc00_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
switch(width){
case 2: avg_pixels2_8_c (dst, src, stride, height); break;
case 4: avg_pixels4_8_c (dst, src, stride, height); break;
case 8: avg_pixels8_8_c (dst, src, stride, height); break;
case 16:avg_pixels16_8_c(dst, src, stride, height); break;
}
}
static inline void avg_tpel_pixels_mc10_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(2*src[j] + src[j+1] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc20_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+1] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc01_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(2*src[j] + src[j+stride] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc11_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(4*src[j] + 3*src[j+1] + 3*src[j+stride] + 2*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc12_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(3*src[j] + 2*src[j+1] + 4*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc02_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((683*(src[j] + 2*src[j+stride] + 1)) >> 11) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc21_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(3*src[j] + 4*src[j+1] + 2*src[j+stride] + 3*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
static inline void avg_tpel_pixels_mc22_c(uint8_t *dst, const uint8_t *src, int stride, int width, int height){
int i,j;
for (i=0; i < height; i++) {
for (j=0; j < width; j++) {
dst[j] = (dst[j] + ((2731*(2*src[j] + 3*src[j+1] + 3*src[j+stride] + 4*src[j+stride+1] + 6)) >> 15) + 1) >> 1;
}
src += stride;
dst += stride;
}
}
#define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=8;\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
const int src0= src[0*srcStride];\
const int src1= src[1*srcStride];\
const int src2= src[2*srcStride];\
const int src3= src[3*srcStride];\
const int src4= src[4*srcStride];\
const int src5= src[5*srcStride];\
const int src6= src[6*srcStride];\
const int src7= src[7*srcStride];\
const int src8= src[8*srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
dst++;\
src++;\
}\
}\
\
static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
\
for(i=0; i<h; i++)\
{\
OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
const int w=16;\
for(i=0; i<w; i++)\
{\
const int src0= src[0*srcStride];\
const int src1= src[1*srcStride];\
const int src2= src[2*srcStride];\
const int src3= src[3*srcStride];\
const int src4= src[4*srcStride];\
const int src5= src[5*srcStride];\
const int src6= src[6*srcStride];\
const int src7= src[7*srcStride];\
const int src8= src[8*srcStride];\
const int src9= src[9*srcStride];\
const int src10= src[10*srcStride];\
const int src11= src[11*srcStride];\
const int src12= src[12*srcStride];\
const int src13= src[13*srcStride];\
const int src14= src[14*srcStride];\
const int src15= src[15*srcStride];\
const int src16= src[16*srcStride];\
OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
dst++;\
src++;\
}\
}\
\
static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8);\
}\
\
static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
}\
\
static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
OPNAME ## pixels8_l2_8(dst, src+1, half, stride, stride, 8, 8);\
}\
\
static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t half[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8);\
}\
\
static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
copy_block9(full, src, 16, stride, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
}\
\
static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t half[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
OPNAME ## pixels8_l2_8(dst, full+16, half, stride, 16, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full+1, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l4_8(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full+1, 8, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[72];\
uint8_t halfHV[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[72];\
uint8_t halfHV[64];\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
}\
void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
uint8_t halfV[64];\
uint8_t halfHV[64];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);\
}\
static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[16*9];\
uint8_t halfH[72];\
copy_block9(full, src, 16, stride, 9);\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
put ## RND ## pixels8_l2_8(halfH, halfH, full+1, 8, 8, 16, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
}\
static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[72];\
put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
}\
\
static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16);\
}\
\
static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
}\
\
static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t half[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
OPNAME ## pixels16_l2_8(dst, src+1, half, stride, stride, 16, 16);\
}\
\
static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t half[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16);\
}\
\
static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
copy_block17(full, src, 24, stride, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
}\
\
static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t half[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
OPNAME ## pixels16_l2_8(dst, full+24, half, stride, 24, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full+1, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH+16, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l4_8(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full+1, 16, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH+16, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[272];\
uint8_t halfHV[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[272];\
uint8_t halfHV[256];\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfH+16, halfHV, stride, 16, 16, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
}\
void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
uint8_t halfV[256];\
uint8_t halfHV[256];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16);\
}\
static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t full[24*17];\
uint8_t halfH[272];\
copy_block17(full, src, 24, stride, 17);\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
put ## RND ## pixels16_l2_8(halfH, halfH, full+1, 16, 16, 24, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
}\
static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
{\
uint8_t halfH[272];\
put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
}
#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
#define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
#define op_put(a, b) a = cm[((b) + 16)>>5]
#define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
QPEL_MC(0, put_ , _ , op_put)
QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
QPEL_MC(0, avg_ , _ , op_avg)
//QPEL_MC(1, avg_no_rnd , _ , op_avg)
#undef op_avg
#undef op_avg_no_rnd
#undef op_put
#undef op_put_no_rnd
void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels8_8_c(dst, src, stride, 8);
}
void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels8_8_c(dst, src, stride, 8);
}
void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels16_8_c(dst, src, stride, 16);
}
void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels16_8_c(dst, src, stride, 16);
}
#define put_qpel8_mc00_c ff_put_pixels8x8_c
#define avg_qpel8_mc00_c ff_avg_pixels8x8_c
#define put_qpel16_mc00_c ff_put_pixels16x16_c
#define avg_qpel16_mc00_c ff_avg_pixels16x16_c
#define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c
#define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c
static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<h; i++){
dst[0]= cm[(9*(src[0] + src[1]) - (src[-1] + src[2]) + 8)>>4];
dst[1]= cm[(9*(src[1] + src[2]) - (src[ 0] + src[3]) + 8)>>4];
dst[2]= cm[(9*(src[2] + src[3]) - (src[ 1] + src[4]) + 8)>>4];
dst[3]= cm[(9*(src[3] + src[4]) - (src[ 2] + src[5]) + 8)>>4];
dst[4]= cm[(9*(src[4] + src[5]) - (src[ 3] + src[6]) + 8)>>4];
dst[5]= cm[(9*(src[5] + src[6]) - (src[ 4] + src[7]) + 8)>>4];
dst[6]= cm[(9*(src[6] + src[7]) - (src[ 5] + src[8]) + 8)>>4];
dst[7]= cm[(9*(src[7] + src[8]) - (src[ 6] + src[9]) + 8)>>4];
dst+=dstStride;
src+=srcStride;
}
}
#if CONFIG_RV40_DECODER
void ff_put_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels16_xy2_8_c(dst, src, stride, 16);
}
void ff_avg_rv40_qpel16_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels16_xy2_8_c(dst, src, stride, 16);
}
void ff_put_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
put_pixels8_xy2_8_c(dst, src, stride, 8);
}
void ff_avg_rv40_qpel8_mc33_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
avg_pixels8_xy2_8_c(dst, src, stride, 8);
}
#endif /* CONFIG_RV40_DECODER */
#if CONFIG_DIRAC_DECODER
#define DIRAC_MC(OPNAME)\
void ff_ ## OPNAME ## _dirac_pixels8_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels8_8_c(dst, src[0], stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels16_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_8_c(dst, src[0], stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels32_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_8_c(dst , src[0] , stride, h);\
OPNAME ## _pixels16_8_c(dst+16, src[0]+16, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels8_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels8_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels16_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l2_8(dst, src[0], src[1], stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels32_l2_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l2_8(dst , src[0] , src[1] , stride, stride, stride, h);\
OPNAME ## _pixels16_l2_8(dst+16, src[0]+16, src[1]+16, stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels8_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels8_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels16_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l4_8(dst, src[0], src[1], src[2], src[3], stride, stride, stride, stride, stride, h);\
}\
void ff_ ## OPNAME ## _dirac_pixels32_l4_c(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
OPNAME ## _pixels16_l4_8(dst , src[0] , src[1] , src[2] , src[3] , stride, stride, stride, stride, stride, h);\
OPNAME ## _pixels16_l4_8(dst+16, src[0]+16, src[1]+16, src[2]+16, src[3]+16, stride, stride, stride, stride, stride, h);\
}
DIRAC_MC(put)
DIRAC_MC(avg)
#endif
static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<w; i++){
const int src_1= src[ -srcStride];
const int src0 = src[0 ];
const int src1 = src[ srcStride];
const int src2 = src[2*srcStride];
const int src3 = src[3*srcStride];
const int src4 = src[4*srcStride];
const int src5 = src[5*srcStride];
const int src6 = src[6*srcStride];
const int src7 = src[7*srcStride];
const int src8 = src[8*srcStride];
const int src9 = src[9*srcStride];
dst[0*dstStride]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
dst[1*dstStride]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
dst[2*dstStride]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
dst[3*dstStride]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
dst[4*dstStride]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
dst[5*dstStride]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
dst[6*dstStride]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
dst[7*dstStride]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
src++;
dst++;
}
}
static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t half[64];
wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
put_pixels8_l2_8(dst, src, half, stride, stride, 8, 8);
}
static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
}
static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t half[64];
wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
put_pixels8_l2_8(dst, src+1, half, stride, stride, 8, 8);
}
static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
}
static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t halfH[88];
uint8_t halfV[64];
uint8_t halfHV[64];
wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
}
static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t halfH[88];
uint8_t halfV[64];
uint8_t halfHV[64];
wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
put_pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8);
}
static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride)
{
uint8_t halfH[88];
wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
}
static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
int x;
const int strength= ff_h263_loop_filter_strength[qscale];
for(x=0; x<8; x++){
int d1, d2, ad1;
int p0= src[x-2*stride];
int p1= src[x-1*stride];
int p2= src[x+0*stride];
int p3= src[x+1*stride];
int d = (p0 - p3 + 4*(p2 - p1)) / 8;
if (d<-2*strength) d1= 0;
else if(d<- strength) d1=-2*strength - d;
else if(d< strength) d1= d;
else if(d< 2*strength) d1= 2*strength - d;
else d1= 0;
p1 += d1;
p2 -= d1;
if(p1&256) p1= ~(p1>>31);
if(p2&256) p2= ~(p2>>31);
src[x-1*stride] = p1;
src[x+0*stride] = p2;
ad1= FFABS(d1)>>1;
d2= av_clip((p0-p3)/4, -ad1, ad1);
src[x-2*stride] = p0 - d2;
src[x+ stride] = p3 + d2;
}
}
}
static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
int y;
const int strength= ff_h263_loop_filter_strength[qscale];
for(y=0; y<8; y++){
int d1, d2, ad1;
int p0= src[y*stride-2];
int p1= src[y*stride-1];
int p2= src[y*stride+0];
int p3= src[y*stride+1];
int d = (p0 - p3 + 4*(p2 - p1)) / 8;
if (d<-2*strength) d1= 0;
else if(d<- strength) d1=-2*strength - d;
else if(d< strength) d1= d;
else if(d< 2*strength) d1= 2*strength - d;
else d1= 0;
p1 += d1;
p2 -= d1;
if(p1&256) p1= ~(p1>>31);
if(p2&256) p2= ~(p2>>31);
src[y*stride-1] = p1;
src[y*stride+0] = p2;
ad1= FFABS(d1)>>1;
d2= av_clip((p0-p3)/4, -ad1, ad1);
src[y*stride-2] = p0 - d2;
src[y*stride+1] = p3 + d2;
}
}
}
static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - pix2[0]);
s += abs(pix1[1] - pix2[1]);
s += abs(pix1[2] - pix2[2]);
s += abs(pix1[3] - pix2[3]);
s += abs(pix1[4] - pix2[4]);
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
s += abs(pix1[8] - pix2[8]);
s += abs(pix1[9] - pix2[9]);
s += abs(pix1[10] - pix2[10]);
s += abs(pix1[11] - pix2[11]);
s += abs(pix1[12] - pix2[12]);
s += abs(pix1[13] - pix2[13]);
s += abs(pix1[14] - pix2[14]);
s += abs(pix1[15] - pix2[15]);
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - pix2[0]);
s += abs(pix1[1] - pix2[1]);
s += abs(pix1[2] - pix2[2]);
s += abs(pix1[3] - pix2[3]);
s += abs(pix1[4] - pix2[4]);
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
pix1 += line_size;
pix2 += line_size;
}
return s;
}
static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
pix1 += line_size;
pix2 += line_size;
pix3 += line_size;
}
return s;
}
static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
MpegEncContext *c = v;
int score1=0;
int score2=0;
int x,y;
for(y=0; y<h; y++){
for(x=0; x<16; x++){
score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
}
if(y+1<h){
for(x=0; x<15; x++){
score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
-FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
s1+= stride;
s2+= stride;
}
if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
else return score1 + FFABS(score2)*8;
}
static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
MpegEncContext *c = v;
int score1=0;
int score2=0;
int x,y;
for(y=0; y<h; y++){
for(x=0; x<8; x++){
score1+= (s1[x ] - s2[x ])*(s1[x ] - s2[x ]);
}
if(y+1<h){
for(x=0; x<7; x++){
score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
-FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
s1+= stride;
s2+= stride;
}
if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
else return score1 + FFABS(score2)*8;
}
static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
int i;
unsigned int sum=0;
for(i=0; i<8*8; i++){
int b= rem[i] + ((basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT));
int w= weight[i];
b>>= RECON_SHIFT;
av_assert2(-512<b && b<512);
sum += (w*b)*(w*b)>>4;
}
return sum>>2;
}
static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){
int i;
for(i=0; i<8*8; i++){
rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
}
}
static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h){
return 0;
}
void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
int i;
memset(cmp, 0, sizeof(void*)*6);
for(i=0; i<6; i++){
switch(type&0xFF){
case FF_CMP_SAD:
cmp[i]= c->sad[i];
break;
case FF_CMP_SATD:
cmp[i]= c->hadamard8_diff[i];
break;
case FF_CMP_SSE:
cmp[i]= c->sse[i];
break;
case FF_CMP_DCT:
cmp[i]= c->dct_sad[i];
break;
case FF_CMP_DCT264:
cmp[i]= c->dct264_sad[i];
break;
case FF_CMP_DCTMAX:
cmp[i]= c->dct_max[i];
break;
case FF_CMP_PSNR:
cmp[i]= c->quant_psnr[i];
break;
case FF_CMP_BIT:
cmp[i]= c->bit[i];
break;
case FF_CMP_RD:
cmp[i]= c->rd[i];
break;
case FF_CMP_VSAD:
cmp[i]= c->vsad[i];
break;
case FF_CMP_VSSE:
cmp[i]= c->vsse[i];
break;
case FF_CMP_ZERO:
cmp[i]= zero_cmp;
break;
case FF_CMP_NSSE:
cmp[i]= c->nsse[i];
break;
#if CONFIG_DWT
case FF_CMP_W53:
cmp[i]= c->w53[i];
break;
case FF_CMP_W97:
cmp[i]= c->w97[i];
break;
#endif
default:
av_log(NULL, AV_LOG_ERROR,"internal error in cmp function selection\n");
}
}
}
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
long i;
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
long a = *(long*)(src+i);
long b = *(long*)(dst+i);
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
}
for(; i<w; i++)
dst[i+0] += src[i+0];
}
static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w){
long i;
#if !HAVE_FAST_UNALIGNED
if((long)src2 & (sizeof(long)-1)){
for(i=0; i+7<w; i+=8){
dst[i+0] = src1[i+0]-src2[i+0];
dst[i+1] = src1[i+1]-src2[i+1];
dst[i+2] = src1[i+2]-src2[i+2];
dst[i+3] = src1[i+3]-src2[i+3];
dst[i+4] = src1[i+4]-src2[i+4];
dst[i+5] = src1[i+5]-src2[i+5];
dst[i+6] = src1[i+6]-src2[i+6];
dst[i+7] = src1[i+7]-src2[i+7];
}
}else
#endif
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
long a = *(long*)(src1+i);
long b = *(long*)(src2+i);
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
}
for(; i<w; i++)
dst[i+0] = src1[i+0]-src2[i+0];
}
static void add_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *diff, int w, int *left, int *left_top){
int i;
uint8_t l, lt;
l= *left;
lt= *left_top;
for(i=0; i<w; i++){
l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
lt= src1[i];
dst[i]= l;
}
*left= l;
*left_top= lt;
}
static void sub_hfyu_median_prediction_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top){
int i;
uint8_t l, lt;
l= *left;
lt= *left_top;
for(i=0; i<w; i++){
const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
lt= src1[i];
l= src2[i];
dst[i]= l - pred;
}
*left= l;
*left_top= lt;
}
static int add_hfyu_left_prediction_c(uint8_t *dst, const uint8_t *src, int w, int acc){
int i;
for(i=0; i<w-1; i++){
acc+= src[i];
dst[i]= acc;
i++;
acc+= src[i];
dst[i]= acc;
}
for(; i<w; i++){
acc+= src[i];
dst[i]= acc;
}
return acc;
}
#if HAVE_BIGENDIAN
#define B 3
#define G 2
#define R 1
#define A 0
#else
#define B 0
#define G 1
#define R 2
#define A 3
#endif
static void add_hfyu_left_prediction_bgr32_c(uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
int i;
int r,g,b,a;
r= *red;
g= *green;
b= *blue;
a= *alpha;
for(i=0; i<w; i++){
b+= src[4*i+B];
g+= src[4*i+G];
r+= src[4*i+R];
a+= src[4*i+A];
dst[4*i+B]= b;
dst[4*i+G]= g;
dst[4*i+R]= r;
dst[4*i+A]= a;
}
*red= r;
*green= g;
*blue= b;
*alpha= a;
}
#undef B
#undef G
#undef R
#undef A
#define BUTTERFLY2(o1,o2,i1,i2) \
o1= (i1)+(i2);\
o2= (i1)-(i2);
#define BUTTERFLY1(x,y) \
{\
int a,b;\
a= x;\
b= y;\
x= a+b;\
y= a-b;\
}
#define BUTTERFLYA(x,y) (FFABS((x)+(y)) + FFABS((x)-(y)))
static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
int i;
int temp[64];
int sum=0;
av_assert2(h==8);
for(i=0; i<8; i++){
//FIXME try pointer walks
BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-dst[stride*i+0],src[stride*i+1]-dst[stride*i+1]);
BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-dst[stride*i+2],src[stride*i+3]-dst[stride*i+3]);
BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-dst[stride*i+4],src[stride*i+5]-dst[stride*i+5]);
BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-dst[stride*i+6],src[stride*i+7]-dst[stride*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
}
for(i=0; i<8; i++){
BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
sum +=
BUTTERFLYA(temp[8*0+i], temp[8*4+i])
+BUTTERFLYA(temp[8*1+i], temp[8*5+i])
+BUTTERFLYA(temp[8*2+i], temp[8*6+i])
+BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
}
return sum;
}
static int hadamard8_intra8x8_c(/*MpegEncContext*/ void *s, uint8_t *src, uint8_t *dummy, int stride, int h){
int i;
int temp[64];
int sum=0;
av_assert2(h==8);
for(i=0; i<8; i++){
//FIXME try pointer walks
BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0],src[stride*i+1]);
BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2],src[stride*i+3]);
BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4],src[stride*i+5]);
BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6],src[stride*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+2]);
BUTTERFLY1(temp[8*i+1], temp[8*i+3]);
BUTTERFLY1(temp[8*i+4], temp[8*i+6]);
BUTTERFLY1(temp[8*i+5], temp[8*i+7]);
BUTTERFLY1(temp[8*i+0], temp[8*i+4]);
BUTTERFLY1(temp[8*i+1], temp[8*i+5]);
BUTTERFLY1(temp[8*i+2], temp[8*i+6]);
BUTTERFLY1(temp[8*i+3], temp[8*i+7]);
}
for(i=0; i<8; i++){
BUTTERFLY1(temp[8*0+i], temp[8*1+i]);
BUTTERFLY1(temp[8*2+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*5+i]);
BUTTERFLY1(temp[8*6+i], temp[8*7+i]);
BUTTERFLY1(temp[8*0+i], temp[8*2+i]);
BUTTERFLY1(temp[8*1+i], temp[8*3+i]);
BUTTERFLY1(temp[8*4+i], temp[8*6+i]);
BUTTERFLY1(temp[8*5+i], temp[8*7+i]);
sum +=
BUTTERFLYA(temp[8*0+i], temp[8*4+i])
+BUTTERFLYA(temp[8*1+i], temp[8*5+i])
+BUTTERFLYA(temp[8*2+i], temp[8*6+i])
+BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
}
sum -= FFABS(temp[8*0] + temp[8*4]); // -mean
return sum;
}
static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
av_assert2(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
return s->dsp.sum_abs_dctelem(temp);
}
#if CONFIG_GPL
#define DCT8_1D {\
const int s07 = SRC(0) + SRC(7);\
const int s16 = SRC(1) + SRC(6);\
const int s25 = SRC(2) + SRC(5);\
const int s34 = SRC(3) + SRC(4);\
const int a0 = s07 + s34;\
const int a1 = s16 + s25;\
const int a2 = s07 - s34;\
const int a3 = s16 - s25;\
const int d07 = SRC(0) - SRC(7);\
const int d16 = SRC(1) - SRC(6);\
const int d25 = SRC(2) - SRC(5);\
const int d34 = SRC(3) - SRC(4);\
const int a4 = d16 + d25 + (d07 + (d07>>1));\
const int a5 = d07 - d34 - (d25 + (d25>>1));\
const int a6 = d07 + d34 - (d16 + (d16>>1));\
const int a7 = d16 - d25 + (d34 + (d34>>1));\
DST(0, a0 + a1 ) ;\
DST(1, a4 + (a7>>2)) ;\
DST(2, a2 + (a3>>1)) ;\
DST(3, a5 + (a6>>2)) ;\
DST(4, a0 - a1 ) ;\
DST(5, a6 - (a5>>2)) ;\
DST(6, (a2>>1) - a3 ) ;\
DST(7, (a4>>2) - a7 ) ;\
}
static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
int16_t dct[8][8];
int i;
int sum=0;
s->dsp.diff_pixels(dct[0], src1, src2, stride);
#define SRC(x) dct[i][x]
#define DST(x,v) dct[i][x]= v
for( i = 0; i < 8; i++ )
DCT8_1D
#undef SRC
#undef DST
#define SRC(x) dct[x][i]
#define DST(x,v) sum += FFABS(v)
for( i = 0; i < 8; i++ )
DCT8_1D
#undef SRC
#undef DST
return sum;
}
#endif
static int dct_max8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int sum=0, i;
av_assert2(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
for(i=0; i<64; i++)
sum= FFMAX(sum, FFABS(temp[i]));
return sum;
}
static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
LOCAL_ALIGNED_16(int16_t, temp, [64*2]);
int16_t * const bak = temp+64;
int sum=0, i;
av_assert2(h==8);
s->mb_intra=0;
s->dsp.diff_pixels(temp, src1, src2, stride);
memcpy(bak, temp, 64*sizeof(int16_t));
s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
s->dct_unquantize_inter(s, temp, 0, s->qscale);
ff_simple_idct_8(temp); //FIXME
for(i=0; i<64; i++)
sum+= (temp[i]-bak[i])*(temp[i]-bak[i]);
return sum;
}
static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
const uint8_t *scantable= s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
int i, last, run, bits, level, distortion, start_i;
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
av_assert2(h==8);
copy_block8(lsrc1, src1, 8, stride, 8);
copy_block8(lsrc2, src2, 8, stride, 8);
s->dsp.diff_pixels(temp, lsrc1, lsrc2, 8);
s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
bits=0;
if (s->mb_intra) {
start_i = 1;
length = s->intra_ac_vlc_length;
last_length= s->intra_ac_vlc_last_length;
bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
} else {
start_i = 0;
length = s->inter_ac_vlc_length;
last_length= s->inter_ac_vlc_last_length;
}
if(last>=start_i){
run=0;
for(i=start_i; i<last; i++){
int j= scantable[i];
level= temp[j];
if(level){
level+=64;
if((level&(~127)) == 0){
bits+= length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
run=0;
}else
run++;
}
i= scantable[last];
level= temp[i] + 64;
av_assert2(level - 64);
if((level&(~127)) == 0){
bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
}
if(last>=0){
if(s->mb_intra)
s->dct_unquantize_intra(s, temp, 0, s->qscale);
else
s->dct_unquantize_inter(s, temp, 0, s->qscale);
}
s->dsp.idct_add(lsrc2, 8, temp);
distortion= s->dsp.sse[1](NULL, lsrc2, lsrc1, 8, 8);
return distortion + ((bits*s->qscale*s->qscale*109 + 64)>>7);
}
static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
const uint8_t *scantable= s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int i, last, run, bits, level, start_i;
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
av_assert2(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->block_last_index[0/*FIXME*/]= last= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i);
bits=0;
if (s->mb_intra) {
start_i = 1;
length = s->intra_ac_vlc_length;
last_length= s->intra_ac_vlc_last_length;
bits+= s->luma_dc_vlc_length[temp[0] + 256]; //FIXME chroma
} else {
start_i = 0;
length = s->inter_ac_vlc_length;
last_length= s->inter_ac_vlc_last_length;
}
if(last>=start_i){
run=0;
for(i=start_i; i<last; i++){
int j= scantable[i];
level= temp[j];
if(level){
level+=64;
if((level&(~127)) == 0){
bits+= length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
run=0;
}else
run++;
}
i= scantable[last];
level= temp[i] + 64;
av_assert2(level - 64);
if((level&(~127)) == 0){
bits+= last_length[UNI_AC_ENC_INDEX(run, level)];
}else
bits+= esc_length;
}
return bits;
}
#define VSAD_INTRA(size) \
static int vsad_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
int score=0; \
int x,y; \
\
for(y=1; y<h; y++){ \
for(x=0; x<size; x+=4){ \
score+= FFABS(s[x ] - s[x +stride]) + FFABS(s[x+1] - s[x+1+stride]) \
+FFABS(s[x+2] - s[x+2+stride]) + FFABS(s[x+3] - s[x+3+stride]); \
} \
s+= stride; \
} \
\
return score; \
}
VSAD_INTRA(8)
VSAD_INTRA(16)
static int vsad16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
int score=0;
int x,y;
for(y=1; y<h; y++){
for(x=0; x<16; x++){
score+= FFABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
}
s1+= stride;
s2+= stride;
}
return score;
}
#define SQ(a) ((a)*(a))
#define VSSE_INTRA(size) \
static int vsse_intra##size##_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ \
int score=0; \
int x,y; \
\
for(y=1; y<h; y++){ \
for(x=0; x<size; x+=4){ \
score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) \
+SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); \
} \
s+= stride; \
} \
\
return score; \
}
VSSE_INTRA(8)
VSSE_INTRA(16)
static int vsse16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){
int score=0;
int x,y;
for(y=1; y<h; y++){
for(x=0; x<16; x++){
score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
}
s1+= stride;
s2+= stride;
}
return score;
}
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
int size){
int score=0;
int i;
for(i=0; i<size; i++)
score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
return score;
}
#define WRAPPER8_16_SQ(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
int score=0;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
if(h==16){\
dst += 8*stride;\
src += 8*stride;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
}\
return score;\
}
WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
#if CONFIG_GPL
WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
#endif
WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
WRAPPER8_16_SQ(rd8x8_c, rd16_c)
WRAPPER8_16_SQ(bit8x8_c, bit16_c)
static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini,
uint32_t maxi, uint32_t maxisign)
{
if(a > mini) return mini;
else if((a^(1U<<31)) > maxisign) return maxi;
else return a;
}
static void vector_clipf_c_opposite_sign(float *dst, const float *src, float *min, float *max, int len){
int i;
uint32_t mini = *(uint32_t*)min;
uint32_t maxi = *(uint32_t*)max;
uint32_t maxisign = maxi ^ (1U<<31);
uint32_t *dsti = (uint32_t*)dst;
const uint32_t *srci = (const uint32_t*)src;
for(i=0; i<len; i+=8) {
dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign);
dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign);
dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign);
dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign);
dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign);
dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign);
dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign);
dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign);
}
}
static void vector_clipf_c(float *dst, const float *src, float min, float max, int len){
int i;
if(min < 0 && max > 0) {
vector_clipf_c_opposite_sign(dst, src, &min, &max, len);
} else {
for(i=0; i < len; i+=8) {
dst[i ] = av_clipf(src[i ], min, max);
dst[i + 1] = av_clipf(src[i + 1], min, max);
dst[i + 2] = av_clipf(src[i + 2], min, max);
dst[i + 3] = av_clipf(src[i + 3], min, max);
dst[i + 4] = av_clipf(src[i + 4], min, max);
dst[i + 5] = av_clipf(src[i + 5], min, max);
dst[i + 6] = av_clipf(src[i + 6], min, max);
dst[i + 7] = av_clipf(src[i + 7], min, max);
}
}
}
static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order)
{
int res = 0;
while (order--)
res += *v1++ * *v2++;
return res;
}
static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
{
int res = 0;
while (order--) {
res += *v1 * *v2++;
*v1++ += mul * *v3++;
}
return res;
}
static void apply_window_int16_c(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len)
{
int i;
int len2 = len >> 1;
for (i = 0; i < len2; i++) {
int16_t w = window[i];
output[i] = (MUL16(input[i], w) + (1 << 14)) >> 15;
output[len-i-1] = (MUL16(input[len-i-1], w) + (1 << 14)) >> 15;
}
}
static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min,
int32_t max, unsigned int len)
{
do {
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
*dst++ = av_clip(*src++, min, max);
len -= 8;
} while (len > 0);
}
static void jref_idct_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct (block);
put_pixels_clamped_c(block, dest, line_size);
}
static void jref_idct_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct (block);
add_pixels_clamped_c(block, dest, line_size);
}
static void ff_jref_idct4_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct4 (block);
put_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct4_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct4 (block);
add_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct2_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
put_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct2_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
add_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct1_put(uint8_t *dest, int line_size, int16_t *block)
{
dest[0] = av_clip_uint8((block[0] + 4)>>3);
}
static void ff_jref_idct1_add(uint8_t *dest, int line_size, int16_t *block)
{
dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
}
/* init static data */
av_cold void ff_dsputil_static_init(void)
{
int i;
for(i=0;i<512;i++) {
ff_squareTbl[i] = (i - 256) * (i - 256);
}
for(i=0; i<64; i++) ff_inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
}
int ff_check_alignment(void){
static int did_fail=0;
LOCAL_ALIGNED_16(int, aligned, [4]);
if((intptr_t)aligned & 15){
if(!did_fail){
#if HAVE_MMX || HAVE_ALTIVEC
av_log(NULL, AV_LOG_ERROR,
"Compiler did not align stack variables. Libavcodec has been miscompiled\n"
"and may be very slow or crash. This is not a bug in libavcodec,\n"
"but in the compiler. You may try recompiling using gcc >= 4.2.\n"
"Do not report crashes to FFmpeg developers.\n");
#endif
did_fail=1;
}
return -1;
}
return 0;
}
av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
ff_check_alignment();
#if CONFIG_ENCODERS
if (avctx->bits_per_raw_sample == 10) {
c->fdct = ff_jpeg_fdct_islow_10;
c->fdct248 = ff_fdct248_islow_10;
} else {
if(avctx->dct_algo==FF_DCT_FASTINT) {
c->fdct = ff_fdct_ifast;
c->fdct248 = ff_fdct_ifast248;
}
else if(avctx->dct_algo==FF_DCT_FAAN) {
c->fdct = ff_faandct;
c->fdct248 = ff_faandct248;
}
else {
c->fdct = ff_jpeg_fdct_islow_8; //slow/accurate/default
c->fdct248 = ff_fdct248_islow_8;
}
}
#endif //CONFIG_ENCODERS
if(avctx->lowres==1){
c->idct_put= ff_jref_idct4_put;
c->idct_add= ff_jref_idct4_add;
c->idct = ff_j_rev_dct4;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==2){
c->idct_put= ff_jref_idct2_put;
c->idct_add= ff_jref_idct2_add;
c->idct = ff_j_rev_dct2;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else if(avctx->lowres==3){
c->idct_put= ff_jref_idct1_put;
c->idct_add= ff_jref_idct1_add;
c->idct = ff_j_rev_dct1;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{
if (avctx->bits_per_raw_sample == 10) {
c->idct_put = ff_simple_idct_put_10;
c->idct_add = ff_simple_idct_add_10;
c->idct = ff_simple_idct_10;
c->idct_permutation_type = FF_NO_IDCT_PERM;
} else {
if(avctx->idct_algo==FF_IDCT_INT){
c->idct_put= jref_idct_put;
c->idct_add= jref_idct_add;
c->idct = ff_j_rev_dct;
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
}else if(avctx->idct_algo==FF_IDCT_FAAN){
c->idct_put= ff_faanidct_put;
c->idct_add= ff_faanidct_add;
c->idct = ff_faanidct;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}else{ //accurate/default
c->idct_put = ff_simple_idct_put_8;
c->idct_add = ff_simple_idct_add_8;
c->idct = ff_simple_idct_8;
c->idct_permutation_type= FF_NO_IDCT_PERM;
}
}
}
c->diff_pixels = diff_pixels_c;
c->put_pixels_clamped = put_pixels_clamped_c;
c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
c->add_pixels_clamped = add_pixels_clamped_c;
c->sum_abs_dctelem = sum_abs_dctelem_c;
c->gmc1 = gmc1_c;
c->gmc = ff_gmc_c;
c->pix_sum = pix_sum_c;
c->pix_norm1 = pix_norm1_c;
c->fill_block_tab[0] = fill_block16_c;
c->fill_block_tab[1] = fill_block8_c;
/* TODO [0] 16 [1] 8 */
c->pix_abs[0][0] = pix_abs16_c;
c->pix_abs[0][1] = pix_abs16_x2_c;
c->pix_abs[0][2] = pix_abs16_y2_c;
c->pix_abs[0][3] = pix_abs16_xy2_c;
c->pix_abs[1][0] = pix_abs8_c;
c->pix_abs[1][1] = pix_abs8_x2_c;
c->pix_abs[1][2] = pix_abs8_y2_c;
c->pix_abs[1][3] = pix_abs8_xy2_c;
c->put_tpel_pixels_tab[ 0] = put_tpel_pixels_mc00_c;
c->put_tpel_pixels_tab[ 1] = put_tpel_pixels_mc10_c;
c->put_tpel_pixels_tab[ 2] = put_tpel_pixels_mc20_c;
c->put_tpel_pixels_tab[ 4] = put_tpel_pixels_mc01_c;
c->put_tpel_pixels_tab[ 5] = put_tpel_pixels_mc11_c;
c->put_tpel_pixels_tab[ 6] = put_tpel_pixels_mc21_c;
c->put_tpel_pixels_tab[ 8] = put_tpel_pixels_mc02_c;
c->put_tpel_pixels_tab[ 9] = put_tpel_pixels_mc12_c;
c->put_tpel_pixels_tab[10] = put_tpel_pixels_mc22_c;
c->avg_tpel_pixels_tab[ 0] = avg_tpel_pixels_mc00_c;
c->avg_tpel_pixels_tab[ 1] = avg_tpel_pixels_mc10_c;
c->avg_tpel_pixels_tab[ 2] = avg_tpel_pixels_mc20_c;
c->avg_tpel_pixels_tab[ 4] = avg_tpel_pixels_mc01_c;
c->avg_tpel_pixels_tab[ 5] = avg_tpel_pixels_mc11_c;
c->avg_tpel_pixels_tab[ 6] = avg_tpel_pixels_mc21_c;
c->avg_tpel_pixels_tab[ 8] = avg_tpel_pixels_mc02_c;
c->avg_tpel_pixels_tab[ 9] = avg_tpel_pixels_mc12_c;
c->avg_tpel_pixels_tab[10] = avg_tpel_pixels_mc22_c;
#define dspfunc(PFX, IDX, NUM) \
c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
dspfunc(put_qpel, 0, 16);
dspfunc(put_no_rnd_qpel, 0, 16);
dspfunc(avg_qpel, 0, 16);
/* dspfunc(avg_no_rnd_qpel, 0, 16); */
dspfunc(put_qpel, 1, 8);
dspfunc(put_no_rnd_qpel, 1, 8);
dspfunc(avg_qpel, 1, 8);
/* dspfunc(avg_no_rnd_qpel, 1, 8); */
#undef dspfunc
c->put_mspel_pixels_tab[0]= ff_put_pixels8x8_c;
c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
#define SET_CMP_FUNC(name) \
c->name[0]= name ## 16_c;\
c->name[1]= name ## 8x8_c;
SET_CMP_FUNC(hadamard8_diff)
c->hadamard8_diff[4]= hadamard8_intra16_c;
c->hadamard8_diff[5]= hadamard8_intra8x8_c;
SET_CMP_FUNC(dct_sad)
SET_CMP_FUNC(dct_max)
#if CONFIG_GPL
SET_CMP_FUNC(dct264_sad)
#endif
c->sad[0]= pix_abs16_c;
c->sad[1]= pix_abs8_c;
c->sse[0]= sse16_c;
c->sse[1]= sse8_c;
c->sse[2]= sse4_c;
SET_CMP_FUNC(quant_psnr)
SET_CMP_FUNC(rd)
SET_CMP_FUNC(bit)
c->vsad[0]= vsad16_c;
c->vsad[4]= vsad_intra16_c;
c->vsad[5]= vsad_intra8_c;
c->vsse[0]= vsse16_c;
c->vsse[4]= vsse_intra16_c;
c->vsse[5]= vsse_intra8_c;
c->nsse[0]= nsse16_c;
c->nsse[1]= nsse8_c;
#if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
ff_dsputil_init_dwt(c);
#endif
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
c->add_bytes= add_bytes_c;
c->diff_bytes= diff_bytes_c;
c->add_hfyu_median_prediction= add_hfyu_median_prediction_c;
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c;
c->add_hfyu_left_prediction = add_hfyu_left_prediction_c;
c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c;
c->bswap_buf= bswap_buf;
c->bswap16_buf = bswap16_buf;
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
c->h263_h_loop_filter= h263_h_loop_filter_c;
c->h263_v_loop_filter= h263_v_loop_filter_c;
}
c->try_8x8basis= try_8x8basis_c;
c->add_8x8basis= add_8x8basis_c;
c->vector_clipf = vector_clipf_c;
c->scalarproduct_int16 = scalarproduct_int16_c;
c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
c->apply_window_int16 = apply_window_int16_c;
c->vector_clip_int32 = vector_clip_int32_c;
c->shrink[0]= av_image_copy_plane;
c->shrink[1]= ff_shrink22;
c->shrink[2]= ff_shrink44;
c->shrink[3]= ff_shrink88;
c->add_pixels8 = add_pixels8_c;
#undef FUNC
#undef FUNCC
#define FUNC(f, depth) f ## _ ## depth
#define FUNCC(f, depth) f ## _ ## depth ## _c
c->draw_edges = FUNCC(draw_edges, 8);
c->clear_block = FUNCC(clear_block, 8);
c->clear_blocks = FUNCC(clear_blocks, 8);
#define BIT_DEPTH_FUNCS(depth) \
c->get_pixels = FUNCC(get_pixels, depth);
switch (avctx->bits_per_raw_sample) {
case 9:
case 10:
case 12:
case 14:
BIT_DEPTH_FUNCS(16);
break;
default:
if(avctx->bits_per_raw_sample<=8 || avctx->codec_type != AVMEDIA_TYPE_VIDEO) {
BIT_DEPTH_FUNCS(8);
}
break;
}
if (ARCH_ALPHA)
ff_dsputil_init_alpha(c, avctx);
if (ARCH_ARM)
ff_dsputil_init_arm(c, avctx);
if (ARCH_BFIN)
ff_dsputil_init_bfin(c, avctx);
if (ARCH_PPC)
ff_dsputil_init_ppc(c, avctx);
if (ARCH_SH4)
ff_dsputil_init_sh4(c, avctx);
if (HAVE_VIS)
ff_dsputil_init_vis(c, avctx);
if (ARCH_X86)
ff_dsputil_init_x86(c, avctx);
ff_init_scantable_permutation(c->idct_permutation,
c->idct_permutation_type);
}
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
{
ff_dsputil_init(c, avctx);
}
av_cold void avpriv_dsputil_init(DSPContext *c, AVCodecContext *avctx)
{
ff_dsputil_init(c, avctx);
}
| ./CrossVul/dataset_final_sorted/CWE-189/c/good_5814_0 |
crossvul-cpp_data_bad_3666_0 | /*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
#include "private/gc_priv.h"
#include <stdio.h>
#include <string.h>
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
{
struct hblk ** result = (struct hblk **)
GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
GC_bool ignore_off_page,
GC_bool retry); /* from alloc.c */
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
word n_blocks;
ptr_t result;
GC_bool retry = FALSE;
/* Round up to a multiple of a granule. */
lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
if (GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
h = GC_allochblk(lb, k, flags);
# ifdef USE_MUNMAP
if (0 == h) {
GC_merge_unmapped();
h = GC_allochblk(lb, k, flags);
}
# endif
while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
h = GC_allochblk(lb, k, flags);
retry = TRUE;
}
if (h == 0) {
result = 0;
} else {
size_t total_bytes = n_blocks * HBLKSIZE;
if (n_blocks > 1) {
GC_large_allocd_bytes += total_bytes;
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
GC_max_large_allocd_bytes = GC_large_allocd_bytes;
}
result = h -> hb_body;
}
return result;
}
/* Allocate a large block of size lb bytes. Clear if appropriate. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
ptr_t result = GC_alloc_large(lb, k, flags);
word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (0 == result) return 0;
if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
/* Clear the whole block, in case of GC_realloc call. */
BZERO(result, n_blocks * HBLKSIZE);
}
return result;
}
/* allocate lb bytes for an object of kind k. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
if(SMALL_OBJ(lb)) {
struct obj_kind * kind = GC_obj_kinds + k;
size_t lg = GC_size_map[lb];
void ** opp = &(kind -> ok_freelist[lg]);
op = *opp;
if (EXPECT(0 == op, FALSE)) {
if (GC_size_map[lb] == 0) {
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
return(GC_generic_malloc_inner(lb, k));
}
if (kind -> ok_reclaim_list == 0) {
if (!GC_alloc_reclaim_list(kind)) goto out;
}
op = GC_allocobj(lg, k);
if (op == 0) goto out;
}
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
} else {
op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
GC_bytes_allocd += lb;
}
out:
return op;
}
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
word lb_adjusted;
void * op;
if (lb <= HBLKSIZE)
return(GC_generic_malloc_inner(lb, k));
lb_adjusted = ADD_SLOP(lb);
op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
GC_bytes_allocd += lb_adjusted;
return op;
}
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
void * result;
DCL_LOCK_STATE;
if (EXPECT(GC_have_errors, FALSE))
GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
} else {
size_t lg;
size_t lb_rounded;
word n_blocks;
GC_bool init;
lg = ROUNDED_UP_GRANULES(lb);
lb_rounded = GRANULES_TO_BYTES(lg);
n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
LOCK();
result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
/* Clear any memory that might be used for GC descriptors */
/* before we release the lock. */
((word *)result)[0] = 0;
((word *)result)[1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
# endif
}
}
GC_bytes_allocd += lb_rounded;
UNLOCK();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
return((*GC_get_oom_fn())(lb));
} else {
return(result);
}
}
/* Allocate lb bytes of atomic (pointerfree) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc_atomic(size_t lb)
#else
GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
{
void *op;
void ** opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = &(GC_aobjfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
*opp = obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return((void *) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
}
/* Allocate lb bytes of composite (pointerful) data */
#ifdef THREAD_LOCAL_ALLOC
GC_INNER void * GC_core_malloc(size_t lb)
#else
GC_API void * GC_CALL GC_malloc(size_t lb)
#endif
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
lg = GC_size_map[lb];
opp = (void **)&(GC_objfreelist[lg]);
LOCK();
if (EXPECT((op = *opp) == 0, FALSE)) {
UNLOCK();
return (GENERAL_MALLOC((word)lb, NORMAL));
}
GC_ASSERT(0 == obj_link(op)
|| ((word)obj_link(op)
<= (word)GC_greatest_plausible_heap_addr
&& (word)obj_link(op)
>= (word)GC_least_plausible_heap_addr));
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
return op;
} else {
return(GENERAL_MALLOC(lb, NORMAL));
}
}
/* Allocate lb bytes of pointerful, traced, but not collectable data */
GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
{
void *op;
void **opp;
size_t lg;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
lg = GC_size_map[lb];
opp = &(GC_uobjfreelist[lg]);
LOCK();
op = *opp;
if (EXPECT(0 != op, TRUE)) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit ws already set on free list. It will be */
/* cleared only temporarily during a collection, as a */
/* result of the normal free list mark bit clearing. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
/* For small objects, the free lists are completely marked. */
}
GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
hdr * hhdr;
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
if (0 == op) return(0);
GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
hhdr = HDR(op);
/* We don't need the lock here, since we have an undisguised */
/* pointer. We do need to hold the lock while we adjust */
/* mark bits. */
LOCK();
set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
GC_ASSERT(hhdr -> hb_n_marks == 0);
hhdr -> hb_n_marks = 1;
UNLOCK();
return((void *) op);
}
}
#ifdef REDIRECT_MALLOC
# ifndef MSWINCE
# include <errno.h>
# endif
/* Avoid unnecessary nested procedure calls here, by #defining some */
/* malloc replacements. Otherwise we end up saving a */
/* meaningless return address in the object. It also speeds things up, */
/* but it is admittedly quite ugly. */
# define GC_debug_malloc_replacement(lb) \
GC_debug_malloc(lb, GC_DBG_RA "unknown", 0)
void * malloc(size_t lb)
{
/* It might help to manually inline the GC_malloc call here. */
/* But any decent compiler should reduce the extra procedure call */
/* to at most a jump instruction in this case. */
# if defined(I386) && defined(GC_SOLARIS_THREADS)
/*
* Thread initialisation can call malloc before
* we're ready for it.
* It's not clear that this is enough to help matters.
* The thread implementation may well call malloc at other
* inopportune times.
*/
if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
# endif /* I386 && GC_SOLARIS_THREADS */
return((void *)REDIRECT_MALLOC(lb));
}
#if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
STATIC ptr_t GC_libpthread_start = 0;
STATIC ptr_t GC_libpthread_end = 0;
STATIC ptr_t GC_libld_start = 0;
STATIC ptr_t GC_libld_end = 0;
STATIC void GC_init_lib_bounds(void)
{
if (GC_libpthread_start != 0) return;
GC_init(); /* if not called yet */
if (!GC_text_mapping("libpthread-",
&GC_libpthread_start, &GC_libpthread_end)) {
WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
/* This might still work with some versions of libpthread, */
/* so we don't abort. Perhaps we should. */
/* Generate message only once: */
GC_libpthread_start = (ptr_t)1;
}
if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
}
}
#endif /* GC_LINUX_THREADS */
void * calloc(size_t n, size_t lb)
{
# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
/* libpthread allocated some memory that is only pointed to by */
/* mmapped thread stacks. Make sure it's not collectable. */
{
static GC_bool lib_bounds_set = FALSE;
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (!EXPECT(lib_bounds_set, TRUE)) {
GC_init_lib_bounds();
lib_bounds_set = TRUE;
}
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end))
return GC_malloc_uncollectable(n*lb);
/* The two ranges are actually usually adjacent, so there may */
/* be a way to speed this up. */
}
# endif
return((void *)REDIRECT_MALLOC(n*lb));
}
#ifndef strdup
char *strdup(const char *s)
{
size_t lb = strlen(s) + 1;
char *result = (char *)REDIRECT_MALLOC(lb);
if (result == 0) {
errno = ENOMEM;
return 0;
}
BCOPY(s, result, lb);
return result;
}
#endif /* !defined(strdup) */
/* If strdup is macro defined, we assume that it actually calls malloc, */
/* and thus the right thing will happen even without overriding it. */
/* This seems to be true on most Linux systems. */
#ifndef strndup
/* This is similar to strdup(). */
char *strndup(const char *str, size_t size)
{
char *copy;
size_t len = strlen(str);
if (len > size)
len = size;
copy = (char *)REDIRECT_MALLOC(len + 1);
if (copy == NULL) {
errno = ENOMEM;
return NULL;
}
BCOPY(str, copy, len);
copy[len] = '\0';
return copy;
}
#endif /* !strndup */
#undef GC_debug_malloc_replacement
#endif /* REDIRECT_MALLOC */
/* Explicitly deallocate an object p. */
GC_API void GC_CALL GC_free(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* In bytes */
size_t ngranules; /* sz in granules */
void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
# ifdef LOG_ALLOCS
GC_err_printf("GC_free(%p), GC: %lu\n", p, (unsigned long)GC_gc_no);
# endif
h = HBLKPTR(p);
hhdr = HDR(h);
# if defined(REDIRECT_MALLOC) && \
(defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
|| defined(MSWIN32))
/* For Solaris, we have to redirect malloc calls during */
/* initialization. For the others, this seems to happen */
/* implicitly. */
/* Don't try to deallocate that memory. */
if (0 == hhdr) return;
# endif
GC_ASSERT(GC_base(p) == p);
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
/* Its unnecessary to clear the mark bit. If the */
/* object is reallocated, it doesn't matter. O.w. the */
/* collector will do it, since it's on a free list. */
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
LOCK();
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
UNLOCK();
}
}
/* Explicitly deallocate an object p when we already hold lock. */
/* Only used for internally allocated objects, so we can take some */
/* shortcuts. */
#ifdef THREADS
GC_INNER void GC_free_inner(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* bytes */
size_t ngranules; /* sz in granules */
void ** flh;
int knd;
struct obj_kind * ok;
h = HBLKPTR(p);
hhdr = HDR(h);
knd = hhdr -> hb_obj_kind;
sz = hhdr -> hb_sz;
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
BZERO((word *)p + 1, sz-sizeof(word));
}
flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
GC_bytes_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (nblocks > 1) {
GC_large_allocd_bytes -= nblocks * HBLKSIZE;
}
GC_freehblk(h);
}
}
#endif /* THREADS */
#if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
# define REDIRECT_FREE GC_free
#endif
#ifdef REDIRECT_FREE
void free(void * p)
{
# if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
{
/* Don't bother with initialization checks. If nothing */
/* has been initialized, the check fails, and that's safe, */
/* since we haven't allocated uncollectable objects either. */
ptr_t caller = (ptr_t)__builtin_return_address(0);
/* This test does not need to ensure memory visibility, since */
/* the bounds will be set when/if we create another thread. */
if (((word)caller >= (word)GC_libpthread_start
&& (word)caller < (word)GC_libpthread_end)
|| ((word)caller >= (word)GC_libld_start
&& (word)caller < (word)GC_libld_end)) {
GC_free(p);
return;
}
}
# endif
# ifndef IGNORE_FREE
REDIRECT_FREE(p);
# endif
}
#endif /* REDIRECT_FREE */
| ./CrossVul/dataset_final_sorted/CWE-189/c/bad_3666_0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.