type
stringclasses
5 values
content
stringlengths
9
163k
defines
#define FLAGS_CLOCKRT 0x02
defines
#define FLAGS_HAS_TIMEOUT 0x04
structs
struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely: */ struct list_head list; /* * The PI object: */ struct rt_mutex pi_mutex; struct task_struct *owner; atomic_t refcount; union futex_key key; };
structs
struct futex_q { struct plist_node list; struct task_struct *task; spinlock_t *lock_ptr; union futex_key key; struct futex_pi_state *pi_state; struct rt_mutex_waiter *rt_waiter; union futex_key *requeue_pi_key; u32 bitset; };
structs
struct futex_hash_bucket { spinlock_t lock; struct plist_head chain; };
functions
int match_futex(union futex_key *key1, union futex_key *key2) { return (key1 && key2 && key1->both.word == key2->both.word && key1->both.ptr == key2->both.ptr && key1->both.offset == key2->both.offset); }
functions
void get_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: ihold(key->shared.inode); break; case FUT_OFF_MMSHARED: atomic_inc(&key->private.mm->mm_count); break; }
functions
void drop_futex_key_refs(union futex_key *key) { if (!key->both.ptr) { /* If we're here then we tried to put a key we failed to get */ WARN_ON_ONCE(1); return; }
functions
int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; int err, ro = 0; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; i...
functions
void put_futex_key(union futex_key *key) { drop_futex_key_refs(key); }
functions
int fault_in_user_writeable(u32 __user *uaddr) { struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); ret = fixup_user_fault(current, mm, (unsigned long)uaddr, FAULT_FLAG_WRITE); up_read(&mm->mmap_sem); return ret < 0 ? ret : 0; }
functions
int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) { int ret; pagefault_disable(); ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); pagefault_enable(); return ret; }
functions
int get_futex_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); pagefault_enable(); return ret ? -EFAULT : 0; }
functions
int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); if (!pi_state) return -ENOMEM; INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; atomic_set(&p...
functions
void free_pi_state(struct futex_pi_state *pi_state) { if (!atomic_dec_and_test(&pi_state->refcount)) return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { raw_spin_lock_irq(&pi_state->owner->pi_lock); list_del_init(&...
functions
void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; if (!futex_cmpxchg_enabled) return; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state...
functions
int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; struct plist_head *head; struct task_struct *p; pid_t pid = uval & FUTEX_TID_MASK; head = &hb->chain; plist_for_each_entry_safe(...
functions
int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { int lock_taken, ret, force_take = 0; u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: ret = lock_taken = 0; /* * To avoi...
functions
void __unqueue_futex(struct futex_q *q) { struct futex_hash_bucket *hb; if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) || WARN_ON(plist_node_empty(&q->list))) return; hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); plist_del(&q->list, &hb->chain); }
functions
void wake_futex(struct futex_q *q) { struct task_struct *p = q->task; if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) return; /* * We set q->lock_ptr = NULL _before_ we wake up the task. If * a non-futex wake up happens on another CPU then the task * might exit and p would dereference...
functions
int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) { struct task_struct *new_owner; struct futex_pi_state *pi_state = this->pi_state; u32 uninitialized_var(curval), newval; int ret = 0; if (!pi_state) return -EINVAL; /* * If current does not own the pi_state then the futex is * inconsist...
functions
int unlock_futex_pi(u32 __user *uaddr, u32 uval) { u32 uninitialized_var(oldval); /* * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) return -EFAULT; if (oldval != uval) return -E...
functions
void double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { if (hb1 <= hb2) { spin_lock(&hb1->lock); if (hb1 < hb2) spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); }
functions
void double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { spin_unlock(&hb1->lock); if (hb1 != hb2) spin_unlock(&hb2->lock); }
functions
int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) { struct futex_hash_bucket *hb; struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; int ret; if (!bitset) return -EINVAL; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);...
functions
int futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; struct futex_hash_bucket *hb1, *hb2; struct plist_head *head; struct futex_q *this, *next; int ret, op_ret; retry: ret = get_fute...
functions
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key2) { /* * If key1 and key2 hash to the same bucket, no need to * requeue. */ if (likely(&hb1->chain != &hb2->chain)) { plist_del(&q->list, &hb1->chain); plist_add(&q->list, &hb2->cha...
functions
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) { get_futex_key_refs(key); q->key = *key; __unqueue_futex(q); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; q->lock_ptr = &hb->lock; wake_up_state(q->task, TASK_NORMAL); }
functions
int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; int ret, vpid; if (get_futex_value_locked...
functions
int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *h...
functions
else if (ret) { /* -EDEADLK */ this->pi_state = NULL; free_pi_state(pi_state); goto out_unlock; }
functions
int unqueue_me(struct futex_q *q) { spinlock_t *lock_ptr; int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ retry: lock_ptr = q->lock_ptr; barrier(); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us ...
functions
int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *newowner) { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; struct task_struct *oldowner = pi_state->owner; u32 uval, uninitialized_var(curval), newval; int ret; /* Owner die...
functions
int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { struct task_struct *owner; int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: */ if (q->pi_state->owner != current) ret = fixup_pi_state_o...
functions
void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, struct hrtimer_sleeper *timeout) { /* * The task state is guaranteed to be set before another task can * wake it. set_current_state() is implemented using set_mb() and * queue_me() calls spin_unlock() upon completion, both serializing ...
functions
int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, struct futex_q *q, struct futex_hash_bucket **hb) { u32 uval; int ret; /* * Access the page AFTER the hash-bucket is locked. * Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); * Userspa...
functions
int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) { struct hrtimer_sleeper timeout, *to = NULL; struct restart_block *restart; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int ret; if (!bitset) return -EINVAL; q.bitset = bitset; if (abs_t...
functions
long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = restart->futex.uaddr; ktime_t t, *tp = NULL; if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { t.tv64 = restart->futex.time; tp = &t; }
functions
int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int res, ret; if (refill_pi_state_cache()) return -ENOMEM; if (time) { to = &timeout; hrtimer_in...
functions
int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { struct futex_hash_bucket *hb; struct futex_q *this, *next; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; u32 uval, vpid = task_pid_vnr(current); int ret; retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lo...
functions
int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, struct futex_q *q, union futex_key *key2, struct hrtimer_sleeper *timeout) { int ret = 0; /* * With the hb lock held, we avoid races while we process the wakeup. * We only need to hold hb (and not hb2) to ensure atomicity as the * w...
functions
int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INI...
functions
else if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. ...
functions
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { u32 uval, uninitialized_var(nval), mval; retry: if (get_user(uval, uaddr)) return -1; if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bi...
functions
int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, unsigned int *pi) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; *entry = (void __user *)(uentry & ~1UL); *pi = uentry & 1; return 0; }
functions
void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int uninitialized_var(next_pi); unsigned long futex_offset; int rc; if (!futex_cmpxchg_en...
functions
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { int cmd = op & FUTEX_CMD_MASK; unsigned int flags = 0; if (!(op & FUTEX_PRIVATE_FLAG)) flags |= FLAGS_SHARED; if (op & FUTEX_CLOCK_REALTIME) { flags |= FLAGS_CLOCKRT; if (cmd != FUTEX_WAIT_BITSET ...
functions
__init futex_detect_cmpxchg(void) { #ifndef CONFIG_HAVE_FUTEX_CMPXCHG u32 curval; /* * This will fail and we want it. Some arch implementations do * runtime detection of the futex_atomic_cmpxchg_inatomic() * functionality. We want to know that before we call in any * of the complex code paths. Also we want t...
functions
__init futex_init(void) { int i; futex_detect_cmpxchg(); for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { plist_head_init(&futex_queues[i].chain); spin_lock_init(&futex_queues[i].lock); }
includes
#include <linux/platform_device.h>
includes
#include <linux/cdev.h>
includes
#include <linux/list.h>
includes
#include <linux/module.h>
includes
#include <linux/fs.h>
includes
#include <linux/interrupt.h>
includes
#include <linux/sched.h>
includes
#include <linux/uaccess.h>
includes
#include <linux/clk.h>
includes
#include <linux/android_pmem.h>
includes
#include <linux/msm_rotator.h>
includes
#include <linux/io.h>
includes
#include <mach/msm_rotator_imem.h>
includes
#include <linux/ktime.h>
includes
#include <linux/workqueue.h>
includes
#include <linux/file.h>
includes
#include <linux/major.h>
includes
#include <linux/regulator/consumer.h>
includes
#include <linux/msm_ion.h>
includes
#include <linux/sync.h>
includes
#include <linux/sw_sync.h>
includes
#include <mach/msm_bus.h>
includes
#include <mach/msm_bus_board.h>
includes
#include <mach/msm_subsystem_map.h>
includes
#include <mach/iommu_domains.h>
defines
#define DRIVER_NAME "msm_rotator"
defines
#define MSM_ROTATOR_BASE (msm_rotator_dev->io_base)
defines
#define MSM_ROTATOR_INTR_ENABLE (MSM_ROTATOR_BASE+0x0020)
defines
#define MSM_ROTATOR_INTR_STATUS (MSM_ROTATOR_BASE+0x0024)
defines
#define MSM_ROTATOR_INTR_CLEAR (MSM_ROTATOR_BASE+0x0028)
defines
#define MSM_ROTATOR_START (MSM_ROTATOR_BASE+0x0030)
defines
#define MSM_ROTATOR_MAX_BURST_SIZE (MSM_ROTATOR_BASE+0x0050)
defines
#define MSM_ROTATOR_HW_VERSION (MSM_ROTATOR_BASE+0x0070)
defines
#define MSM_ROTATOR_SW_RESET (MSM_ROTATOR_BASE+0x0074)
defines
#define MSM_ROTATOR_SRC_SIZE (MSM_ROTATOR_BASE+0x1108)
defines
#define MSM_ROTATOR_SRCP0_ADDR (MSM_ROTATOR_BASE+0x110c)
defines
#define MSM_ROTATOR_SRCP1_ADDR (MSM_ROTATOR_BASE+0x1110)
defines
#define MSM_ROTATOR_SRCP2_ADDR (MSM_ROTATOR_BASE+0x1114)
defines
#define MSM_ROTATOR_SRC_YSTRIDE1 (MSM_ROTATOR_BASE+0x111c)
defines
#define MSM_ROTATOR_SRC_YSTRIDE2 (MSM_ROTATOR_BASE+0x1120)
defines
#define MSM_ROTATOR_SRC_FORMAT (MSM_ROTATOR_BASE+0x1124)
defines
#define MSM_ROTATOR_SRC_UNPACK_PATTERN1 (MSM_ROTATOR_BASE+0x1128)
defines
#define MSM_ROTATOR_SUB_BLOCK_CFG (MSM_ROTATOR_BASE+0x1138)
defines
#define MSM_ROTATOR_OUT_PACK_PATTERN1 (MSM_ROTATOR_BASE+0x1154)
defines
#define MSM_ROTATOR_OUTP0_ADDR (MSM_ROTATOR_BASE+0x1168)
defines
#define MSM_ROTATOR_OUTP1_ADDR (MSM_ROTATOR_BASE+0x116c)
defines
#define MSM_ROTATOR_OUTP2_ADDR (MSM_ROTATOR_BASE+0x1170)
defines
#define MSM_ROTATOR_OUT_YSTRIDE1 (MSM_ROTATOR_BASE+0x1178)
defines
#define MSM_ROTATOR_OUT_YSTRIDE2 (MSM_ROTATOR_BASE+0x117c)
defines
#define MSM_ROTATOR_SRC_XY (MSM_ROTATOR_BASE+0x1200)
defines
#define MSM_ROTATOR_SRC_IMAGE_SIZE (MSM_ROTATOR_BASE+0x1208)