repo_name
string
path
string
copies
string
size
string
content
string
license
string
gdetal/kernel_omap
fs/proc/generic.c
2338
19994
/* * proc/fs/generic.c --- generic routines for the proc-fs * * This file contains generic proc-fs routines for handling * directories and files. * * Copyright (C) 1991, 1992 Linus Torvalds. * Copyright (C) 1997 Theodore Ts'o */ #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/namei.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <asm/uaccess.h> #include "internal.h" DEFINE_SPINLOCK(proc_subdir_lock); static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de) { if (de->namelen != len) return 0; return !memcmp(name, de->name, len); } /* buffer size is one page but our output routines use some slack for overruns */ #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) static ssize_t __proc_file_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; char *page; ssize_t retval=0; int eof=0; ssize_t n, count; char *start; struct proc_dir_entry * dp; unsigned long long pos; /* * Gaah, please just use "seq_file" instead. The legacy /proc * interfaces cut loff_t down to off_t for reads, and ignore * the offset entirely for writes.. */ pos = *ppos; if (pos > MAX_NON_LFS) return 0; if (nbytes > MAX_NON_LFS - pos) nbytes = MAX_NON_LFS - pos; dp = PDE(inode); if (!(page = (char*) __get_free_page(GFP_TEMPORARY))) return -ENOMEM; while ((nbytes > 0) && !eof) { count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); start = NULL; if (dp->read_proc) { /* * How to be a proc read function * ------------------------------ * Prototype: * int f(char *buffer, char **start, off_t offset, * int count, int *peof, void *dat) * * Assume that the buffer is "count" bytes in size. * * If you know you have supplied all the data you * have, set *peof. * * You have three ways to return data: * 0) Leave *start = NULL. (This is the default.) * Put the data of the requested offset at that * offset within the buffer. Return the number (n) * of bytes there are from the beginning of the * buffer up to the last byte of data. If the * number of supplied bytes (= n - offset) is * greater than zero and you didn't signal eof * and the reader is prepared to take more data * you will be called again with the requested * offset advanced by the number of bytes * absorbed. This interface is useful for files * no larger than the buffer. * 1) Set *start = an unsigned long value less than * the buffer address but greater than zero. * Put the data of the requested offset at the * beginning of the buffer. Return the number of * bytes of data placed there. If this number is * greater than zero and you didn't signal eof * and the reader is prepared to take more data * you will be called again with the requested * offset advanced by *start. This interface is * useful when you have a large file consisting * of a series of blocks which you want to count * and return as wholes. * (Hack by Paul.Russell@rustcorp.com.au) * 2) Set *start = an address within the buffer. * Put the data of the requested offset at *start. * Return the number of bytes of data placed there. * If this number is greater than zero and you * didn't signal eof and the reader is prepared to * take more data you will be called again with the * requested offset advanced by the number of bytes * absorbed. */ n = dp->read_proc(page, &start, *ppos, count, &eof, dp->data); } else break; if (n == 0) /* end of file */ break; if (n < 0) { /* error */ if (retval == 0) retval = n; break; } if (start == NULL) { if (n > PAGE_SIZE) { printk(KERN_ERR "proc_file_read: Apparent buffer overflow!\n"); n = PAGE_SIZE; } n -= *ppos; if (n <= 0) break; if (n > count) n = count; start = page + *ppos; } else if (start < page) { if (n > PAGE_SIZE) { printk(KERN_ERR "proc_file_read: Apparent buffer overflow!\n"); n = PAGE_SIZE; } if (n > count) { /* * Don't reduce n because doing so might * cut off part of a data block. */ printk(KERN_WARNING "proc_file_read: Read count exceeded\n"); } } else /* start >= page */ { unsigned long startoff = (unsigned long)(start - page); if (n > (PAGE_SIZE - startoff)) { printk(KERN_ERR "proc_file_read: Apparent buffer overflow!\n"); n = PAGE_SIZE - startoff; } if (n > count) n = count; } n -= copy_to_user(buf, start < page ? page : start, n); if (n == 0) { if (retval == 0) retval = -EFAULT; break; } *ppos += start < page ? (unsigned long)start : n; nbytes -= n; buf += n; retval += n; } free_page((unsigned long) page); return retval; } static ssize_t proc_file_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); ssize_t rv = -EIO; spin_lock(&pde->pde_unload_lock); if (!pde->proc_fops) { spin_unlock(&pde->pde_unload_lock); return rv; } pde->pde_users++; spin_unlock(&pde->pde_unload_lock); rv = __proc_file_read(file, buf, nbytes, ppos); pde_users_dec(pde); return rv; } static ssize_t proc_file_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); ssize_t rv = -EIO; if (pde->write_proc) { spin_lock(&pde->pde_unload_lock); if (!pde->proc_fops) { spin_unlock(&pde->pde_unload_lock); return rv; } pde->pde_users++; spin_unlock(&pde->pde_unload_lock); /* FIXME: does this routine need ppos? probably... */ rv = pde->write_proc(file, buffer, count, pde->data); pde_users_dec(pde); } return rv; } static loff_t proc_file_lseek(struct file *file, loff_t offset, int orig) { loff_t retval = -EINVAL; switch (orig) { case 1: offset += file->f_pos; /* fallthrough */ case 0: if (offset < 0 || offset > MAX_NON_LFS) break; file->f_pos = retval = offset; } return retval; } static const struct file_operations proc_file_operations = { .llseek = proc_file_lseek, .read = proc_file_read, .write = proc_file_write, }; static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; struct proc_dir_entry *de = PDE(inode); int error; error = inode_change_ok(inode, iattr); if (error) return error; if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { error = vmtruncate(inode, iattr->ia_size); if (error) return error; } setattr_copy(inode, iattr); mark_inode_dirty(inode); de->uid = inode->i_uid; de->gid = inode->i_gid; de->mode = inode->i_mode; return 0; } static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct proc_dir_entry *de = PROC_I(inode)->pde; if (de && de->nlink) inode->i_nlink = de->nlink; generic_fillattr(inode, stat); return 0; } static const struct inode_operations proc_file_inode_operations = { .setattr = proc_notify_change, }; /* * This function parses a name such as "tty/driver/serial", and * returns the struct proc_dir_entry for "/proc/tty/driver", and * returns "serial" in residual. */ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret, const char **residual) { const char *cp = name, *next; struct proc_dir_entry *de; unsigned int len; de = *ret; if (!de) de = &proc_root; while (1) { next = strchr(cp, '/'); if (!next) break; len = next - cp; for (de = de->subdir; de ; de = de->next) { if (proc_match(len, cp, de)) break; } if (!de) { WARN(1, "name '%s'\n", name); return -ENOENT; } cp += len + 1; } *residual = cp; *ret = de; return 0; } static int xlate_proc_name(const char *name, struct proc_dir_entry **ret, const char **residual) { int rv; spin_lock(&proc_subdir_lock); rv = __xlate_proc_name(name, ret, residual); spin_unlock(&proc_subdir_lock); return rv; } static DEFINE_IDA(proc_inum_ida); static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ #define PROC_DYNAMIC_FIRST 0xF0000000U /* * Return an inode number between PROC_DYNAMIC_FIRST and * 0xffffffff, or zero on failure. */ static unsigned int get_inode_number(void) { unsigned int i; int error; retry: if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0) return 0; spin_lock(&proc_inum_lock); error = ida_get_new(&proc_inum_ida, &i); spin_unlock(&proc_inum_lock); if (error == -EAGAIN) goto retry; else if (error) return 0; if (i > UINT_MAX - PROC_DYNAMIC_FIRST) { spin_lock(&proc_inum_lock); ida_remove(&proc_inum_ida, i); spin_unlock(&proc_inum_lock); return 0; } return PROC_DYNAMIC_FIRST + i; } static void release_inode_number(unsigned int inum) { spin_lock(&proc_inum_lock); ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); spin_unlock(&proc_inum_lock); } static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) { nd_set_link(nd, PDE(dentry->d_inode)->data); return NULL; } static const struct inode_operations proc_link_inode_operations = { .readlink = generic_readlink, .follow_link = proc_follow_link, }; /* * As some entries in /proc are volatile, we want to * get rid of unused dentries. This could be made * smarter: we could keep a "volatile" flag in the * inode to indicate which ones to keep. */ static int proc_delete_dentry(const struct dentry * dentry) { return 1; } static const struct dentry_operations proc_dentry_operations = { .d_delete = proc_delete_dentry, }; /* * Don't create negative dentries here, return -ENOENT by hand * instead. */ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, struct dentry *dentry) { struct inode *inode = NULL; int error = -ENOENT; spin_lock(&proc_subdir_lock); for (de = de->subdir; de ; de = de->next) { if (de->namelen != dentry->d_name.len) continue; if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { pde_get(de); spin_unlock(&proc_subdir_lock); error = -EINVAL; inode = proc_get_inode(dir->i_sb, de); goto out_unlock; } } spin_unlock(&proc_subdir_lock); out_unlock: if (inode) { d_set_d_op(dentry, &proc_dentry_operations); d_add(dentry, inode); return NULL; } if (de) pde_put(de); return ERR_PTR(error); } struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { return proc_lookup_de(PDE(dir), dir, dentry); } /* * This returns non-zero if at EOF, so that the /proc * root directory can use this and check if it should * continue with the <pid> entries.. * * Note that the VFS-layer doesn't care about the return * value of the readdir() call, as long as it's non-negative * for success.. */ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, filldir_t filldir) { unsigned int ino; int i; struct inode *inode = filp->f_path.dentry->d_inode; int ret = 0; ino = inode->i_ino; i = filp->f_pos; switch (i) { case 0: if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall through */ case 1: if (filldir(dirent, "..", 2, i, parent_ino(filp->f_path.dentry), DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall through */ default: spin_lock(&proc_subdir_lock); de = de->subdir; i -= 2; for (;;) { if (!de) { ret = 1; spin_unlock(&proc_subdir_lock); goto out; } if (!i) break; de = de->next; i--; } do { struct proc_dir_entry *next; /* filldir passes info to user space */ pde_get(de); spin_unlock(&proc_subdir_lock); if (filldir(dirent, de->name, de->namelen, filp->f_pos, de->low_ino, de->mode >> 12) < 0) { pde_put(de); goto out; } spin_lock(&proc_subdir_lock); filp->f_pos++; next = de->next; pde_put(de); de = next; } while (de); spin_unlock(&proc_subdir_lock); } ret = 1; out: return ret; } int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; return proc_readdir_de(PDE(inode), filp, dirent, filldir); } /* * These are the generic /proc directory operations. They * use the in-memory "struct proc_dir_entry" tree to parse * the /proc directory. */ static const struct file_operations proc_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = proc_readdir, }; /* * proc directories can do almost nothing.. */ static const struct inode_operations proc_dir_inode_operations = { .lookup = proc_lookup, .getattr = proc_getattr, .setattr = proc_notify_change, }; static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) { unsigned int i; struct proc_dir_entry *tmp; i = get_inode_number(); if (i == 0) return -EAGAIN; dp->low_ino = i; if (S_ISDIR(dp->mode)) { if (dp->proc_iops == NULL) { dp->proc_fops = &proc_dir_operations; dp->proc_iops = &proc_dir_inode_operations; } dir->nlink++; } else if (S_ISLNK(dp->mode)) { if (dp->proc_iops == NULL) dp->proc_iops = &proc_link_inode_operations; } else if (S_ISREG(dp->mode)) { if (dp->proc_fops == NULL) dp->proc_fops = &proc_file_operations; if (dp->proc_iops == NULL) dp->proc_iops = &proc_file_inode_operations; } spin_lock(&proc_subdir_lock); for (tmp = dir->subdir; tmp; tmp = tmp->next) if (strcmp(tmp->name, dp->name) == 0) { WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n", dir->name, dp->name); break; } dp->next = dir->subdir; dp->parent = dir; dir->subdir = dp; spin_unlock(&proc_subdir_lock); return 0; } static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, const char *name, mode_t mode, nlink_t nlink) { struct proc_dir_entry *ent = NULL; const char *fn = name; unsigned int len; /* make sure name is valid */ if (!name || !strlen(name)) goto out; if (xlate_proc_name(name, parent, &fn) != 0) goto out; /* At this point there must not be any '/' characters beyond *fn */ if (strchr(fn, '/')) goto out; len = strlen(fn); ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL); if (!ent) goto out; memset(ent, 0, sizeof(struct proc_dir_entry)); memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1); ent->name = ((char *) ent) + sizeof(*ent); ent->namelen = len; ent->mode = mode; ent->nlink = nlink; atomic_set(&ent->count, 1); ent->pde_users = 0; spin_lock_init(&ent->pde_unload_lock); ent->pde_unload_completion = NULL; INIT_LIST_HEAD(&ent->pde_openers); out: return ent; } struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent, const char *dest) { struct proc_dir_entry *ent; ent = __proc_create(&parent, name, (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); if (ent) { ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); if (ent->data) { strcpy((char*)ent->data,dest); if (proc_register(parent, ent) < 0) { kfree(ent->data); kfree(ent); ent = NULL; } } else { kfree(ent); ent = NULL; } } return ent; } EXPORT_SYMBOL(proc_symlink); struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, struct proc_dir_entry *parent) { struct proc_dir_entry *ent; ent = __proc_create(&parent, name, S_IFDIR | mode, 2); if (ent) { if (proc_register(parent, ent) < 0) { kfree(ent); ent = NULL; } } return ent; } EXPORT_SYMBOL(proc_mkdir_mode); struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, struct proc_dir_entry *parent) { struct proc_dir_entry *ent; ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2); if (ent) { ent->data = net; if (proc_register(parent, ent) < 0) { kfree(ent); ent = NULL; } } return ent; } EXPORT_SYMBOL_GPL(proc_net_mkdir); struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) { return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); } EXPORT_SYMBOL(proc_mkdir); struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) { struct proc_dir_entry *ent; nlink_t nlink; if (S_ISDIR(mode)) { if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO | S_IXUGO; nlink = 2; } else { if ((mode & S_IFMT) == 0) mode |= S_IFREG; if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO; nlink = 1; } ent = __proc_create(&parent, name, mode, nlink); if (ent) { if (proc_register(parent, ent) < 0) { kfree(ent); ent = NULL; } } return ent; } EXPORT_SYMBOL(create_proc_entry); struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops, void *data) { struct proc_dir_entry *pde; nlink_t nlink; if (S_ISDIR(mode)) { if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO | S_IXUGO; nlink = 2; } else { if ((mode & S_IFMT) == 0) mode |= S_IFREG; if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO; nlink = 1; } pde = __proc_create(&parent, name, mode, nlink); if (!pde) goto out; pde->proc_fops = proc_fops; pde->data = data; if (proc_register(parent, pde) < 0) goto out_free; return pde; out_free: kfree(pde); out: return NULL; } EXPORT_SYMBOL(proc_create_data); static void free_proc_entry(struct proc_dir_entry *de) { release_inode_number(de->low_ino); if (S_ISLNK(de->mode)) kfree(de->data); kfree(de); } void pde_put(struct proc_dir_entry *pde) { if (atomic_dec_and_test(&pde->count)) free_proc_entry(pde); } /* * Remove a /proc entry and free it if it's not currently in use. */ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) { struct proc_dir_entry **p; struct proc_dir_entry *de = NULL; const char *fn = name; unsigned int len; spin_lock(&proc_subdir_lock); if (__xlate_proc_name(name, &parent, &fn) != 0) { spin_unlock(&proc_subdir_lock); return; } len = strlen(fn); for (p = &parent->subdir; *p; p=&(*p)->next ) { if (proc_match(len, fn, *p)) { de = *p; *p = de->next; de->next = NULL; break; } } spin_unlock(&proc_subdir_lock); if (!de) { WARN(1, "name '%s'\n", name); return; } spin_lock(&de->pde_unload_lock); /* * Stop accepting new callers into module. If you're * dynamically allocating ->proc_fops, save a pointer somewhere. */ de->proc_fops = NULL; /* Wait until all existing callers into module are done. */ if (de->pde_users > 0) { DECLARE_COMPLETION_ONSTACK(c); if (!de->pde_unload_completion) de->pde_unload_completion = &c; spin_unlock(&de->pde_unload_lock); wait_for_completion(de->pde_unload_completion); spin_lock(&de->pde_unload_lock); } while (!list_empty(&de->pde_openers)) { struct pde_opener *pdeo; pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); list_del(&pdeo->lh); spin_unlock(&de->pde_unload_lock); pdeo->release(pdeo->inode, pdeo->file); kfree(pdeo); spin_lock(&de->pde_unload_lock); } spin_unlock(&de->pde_unload_lock); if (S_ISDIR(de->mode)) parent->nlink--; de->nlink = 0; WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " "'%s/%s', leaking at least '%s'\n", __func__, de->parent->name, de->name, de->subdir->name); pde_put(de); } EXPORT_SYMBOL(remove_proc_entry);
gpl-2.0
onejay09/runnymede-kitkat_3.0.101-wip
tools/perf/builtin-inject.c
3106
5702
/* * builtin-inject.c * * Builtin inject command: Examine the live mode (stdin) event stream * and repipe it to stdout while optionally injecting additional * events into it. */ #include "builtin.h" #include "perf.h" #include "util/session.h" #include "util/debug.h" #include "util/parse-options.h" static char const *input_name = "-"; static bool inject_build_ids; static int perf_event__repipe_synth(union perf_event *event, struct perf_session *session __used) { uint32_t size; void *buf = event; size = event->header.size; while (size) { int ret = write(STDOUT_FILENO, buf, size); if (ret < 0) return -errno; size -= ret; buf += ret; } return 0; } static int perf_event__repipe(union perf_event *event, struct perf_sample *sample __used, struct perf_session *session) { return perf_event__repipe_synth(event, session); } static int perf_event__repipe_sample(union perf_event *event, struct perf_sample *sample __used, struct perf_evsel *evsel __used, struct perf_session *session) { return perf_event__repipe_synth(event, session); } static int perf_event__repipe_mmap(union perf_event *event, struct perf_sample *sample, struct perf_session *session) { int err; err = perf_event__process_mmap(event, sample, session); perf_event__repipe(event, sample, session); return err; } static int perf_event__repipe_task(union perf_event *event, struct perf_sample *sample, struct perf_session *session) { int err; err = perf_event__process_task(event, sample, session); perf_event__repipe(event, sample, session); return err; } static int perf_event__repipe_tracing_data(union perf_event *event, struct perf_session *session) { int err; perf_event__repipe_synth(event, session); err = perf_event__process_tracing_data(event, session); return err; } static int dso__read_build_id(struct dso *self) { if (self->has_build_id) return 0; if (filename__read_build_id(self->long_name, self->build_id, sizeof(self->build_id)) > 0) { self->has_build_id = true; return 0; } return -1; } static int dso__inject_build_id(struct dso *self, struct perf_session *session) { u16 misc = PERF_RECORD_MISC_USER; struct machine *machine; int err; if (dso__read_build_id(self) < 0) { pr_debug("no build_id found for %s\n", self->long_name); return -1; } machine = perf_session__find_host_machine(session); if (machine == NULL) { pr_err("Can't find machine for session\n"); return -1; } if (self->kernel) misc = PERF_RECORD_MISC_KERNEL; err = perf_event__synthesize_build_id(self, misc, perf_event__repipe, machine, session); if (err) { pr_err("Can't synthesize build_id event for %s\n", self->long_name); return -1; } return 0; } static int perf_event__inject_buildid(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, struct perf_session *session) { struct addr_location al; struct thread *thread; u8 cpumode; cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; thread = perf_session__findnew(session, event->ip.pid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", event->header.type); goto repipe; } thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, event->ip.pid, event->ip.ip, &al); if (al.map != NULL) { if (!al.map->dso->hit) { al.map->dso->hit = 1; if (map__load(al.map, NULL) >= 0) { dso__inject_build_id(al.map->dso, session); /* * If this fails, too bad, let the other side * account this as unresolved. */ } else pr_warning("no symbols found in %s, maybe " "install a debug package?\n", al.map->dso->long_name); } } repipe: perf_event__repipe(event, sample, session); return 0; } struct perf_event_ops inject_ops = { .sample = perf_event__repipe_sample, .mmap = perf_event__repipe, .comm = perf_event__repipe, .fork = perf_event__repipe, .exit = perf_event__repipe, .lost = perf_event__repipe, .read = perf_event__repipe, .throttle = perf_event__repipe, .unthrottle = perf_event__repipe, .attr = perf_event__repipe_synth, .event_type = perf_event__repipe_synth, .tracing_data = perf_event__repipe_synth, .build_id = perf_event__repipe_synth, }; extern volatile int session_done; static void sig_handler(int sig __attribute__((__unused__))) { session_done = 1; } static int __cmd_inject(void) { struct perf_session *session; int ret = -EINVAL; signal(SIGINT, sig_handler); if (inject_build_ids) { inject_ops.sample = perf_event__inject_buildid; inject_ops.mmap = perf_event__repipe_mmap; inject_ops.fork = perf_event__repipe_task; inject_ops.tracing_data = perf_event__repipe_tracing_data; } session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops); if (session == NULL) return -ENOMEM; ret = perf_session__process_events(session, &inject_ops); perf_session__delete(session); return ret; } static const char * const report_usage[] = { "perf inject [<options>]", NULL }; static const struct option options[] = { OPT_BOOLEAN('b', "build-ids", &inject_build_ids, "Inject build-ids into the output stream"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show build ids, etc)"), OPT_END() }; int cmd_inject(int argc, const char **argv, const char *prefix __used) { argc = parse_options(argc, argv, options, report_usage, 0); /* * Any (unrecognized) arguments left? */ if (argc) usage_with_options(report_usage, options); if (symbol__init() < 0) return -1; return __cmd_inject(); }
gpl-2.0
namko/UTV210-Kernel-2.6.35.7
sound/pci/emu10k1/voice.c
4642
4616
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Lee Revell <rlrevell@joe-job.com> * Routines for control of EMU10K1 chips - voice manager * * Rewrote voice allocator for multichannel support - rlrevell 12/2004 * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/emu10k1.h> /* Previously the voice allocator started at 0 every time. The new voice * allocator uses a round robin scheme. The next free voice is tracked in * the card record and each allocation begins where the last left off. The * hardware requires stereo interleaved voices be aligned to an even/odd * boundary. For multichannel voice allocation we ensure than the block of * voices does not cross the 32 voice boundary. This simplifies the * multichannel support and ensures we can use a single write to the * (set|clear)_loop_stop registers. Otherwise (for example) the voices would * get out of sync when pausing/resuming a stream. * --rlrevell */ static int voice_alloc(struct snd_emu10k1 *emu, int type, int number, struct snd_emu10k1_voice **rvoice) { struct snd_emu10k1_voice *voice; int i, j, k, first_voice, last_voice, skip; *rvoice = NULL; first_voice = last_voice = 0; for (i = emu->next_free_voice, j = 0; j < NUM_G ; i += number, j += number) { /* printk(KERN_DEBUG "i %d j %d next free %d!\n", i, j, emu->next_free_voice); */ i %= NUM_G; /* stereo voices must be even/odd */ if ((number == 2) && (i % 2)) { i++; continue; } skip = 0; for (k = 0; k < number; k++) { voice = &emu->voices[(i+k) % NUM_G]; if (voice->use) { skip = 1; break; } } if (!skip) { /* printk(KERN_DEBUG "allocated voice %d\n", i); */ first_voice = i; last_voice = (i + number) % NUM_G; emu->next_free_voice = last_voice; break; } } if (first_voice == last_voice) return -ENOMEM; for (i = 0; i < number; i++) { voice = &emu->voices[(first_voice + i) % NUM_G]; /* printk(kERN_DEBUG "voice alloc - %i, %i of %i\n", voice->number, idx-first_voice+1, number); */ voice->use = 1; switch (type) { case EMU10K1_PCM: voice->pcm = 1; break; case EMU10K1_SYNTH: voice->synth = 1; break; case EMU10K1_MIDI: voice->midi = 1; break; case EMU10K1_EFX: voice->efx = 1; break; } } *rvoice = &emu->voices[first_voice]; return 0; } int snd_emu10k1_voice_alloc(struct snd_emu10k1 *emu, int type, int number, struct snd_emu10k1_voice **rvoice) { unsigned long flags; int result; if (snd_BUG_ON(!rvoice)) return -EINVAL; if (snd_BUG_ON(!number)) return -EINVAL; spin_lock_irqsave(&emu->voice_lock, flags); for (;;) { result = voice_alloc(emu, type, number, rvoice); if (result == 0 || type == EMU10K1_SYNTH || type == EMU10K1_MIDI) break; /* free a voice from synth */ if (emu->get_synth_voice) { result = emu->get_synth_voice(emu); if (result >= 0) { struct snd_emu10k1_voice *pvoice = &emu->voices[result]; pvoice->interrupt = NULL; pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = pvoice->efx = 0; pvoice->epcm = NULL; } } if (result < 0) break; } spin_unlock_irqrestore(&emu->voice_lock, flags); return result; } EXPORT_SYMBOL(snd_emu10k1_voice_alloc); int snd_emu10k1_voice_free(struct snd_emu10k1 *emu, struct snd_emu10k1_voice *pvoice) { unsigned long flags; if (snd_BUG_ON(!pvoice)) return -EINVAL; spin_lock_irqsave(&emu->voice_lock, flags); pvoice->interrupt = NULL; pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = pvoice->efx = 0; pvoice->epcm = NULL; snd_emu10k1_voice_init(emu, pvoice->number); spin_unlock_irqrestore(&emu->voice_lock, flags); return 0; } EXPORT_SYMBOL(snd_emu10k1_voice_free);
gpl-2.0
acuster/FirefoxOS-Flatfish-kernel
sound/soc/codecs/wm8523.c
4898
14469
/* * wm8523.c -- WM8523 ALSA SoC Audio driver * * Copyright 2009 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8523.h" #define WM8523_NUM_SUPPLIES 2 static const char *wm8523_supply_names[WM8523_NUM_SUPPLIES] = { "AVDD", "LINEVDD", }; #define WM8523_NUM_RATES 7 /* codec private data */ struct wm8523_priv { enum snd_soc_control_type control_type; struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES]; unsigned int sysclk; unsigned int rate_constraint_list[WM8523_NUM_RATES]; struct snd_pcm_hw_constraint_list rate_constraint; }; static const u16 wm8523_reg[WM8523_REGISTER_COUNT] = { 0x8523, /* R0 - DEVICE_ID */ 0x0001, /* R1 - REVISION */ 0x0000, /* R2 - PSCTRL1 */ 0x1812, /* R3 - AIF_CTRL1 */ 0x0000, /* R4 - AIF_CTRL2 */ 0x0001, /* R5 - DAC_CTRL3 */ 0x0190, /* R6 - DAC_GAINL */ 0x0190, /* R7 - DAC_GAINR */ 0x0000, /* R8 - ZERO_DETECT */ }; static int wm8523_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { switch (reg) { case WM8523_DEVICE_ID: case WM8523_REVISION: return 1; default: return 0; } } static int wm8523_reset(struct snd_soc_codec *codec) { return snd_soc_write(codec, WM8523_DEVICE_ID, 0); } static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 25, 0); static const char *wm8523_zd_count_text[] = { "1024", "2048", }; static const struct soc_enum wm8523_zc_count = SOC_ENUM_SINGLE(WM8523_ZERO_DETECT, 0, 2, wm8523_zd_count_text); static const struct snd_kcontrol_new wm8523_controls[] = { SOC_DOUBLE_R_TLV("Playback Volume", WM8523_DAC_GAINL, WM8523_DAC_GAINR, 0, 448, 0, dac_tlv), SOC_SINGLE("ZC Switch", WM8523_DAC_CTRL3, 4, 1, 0), SOC_SINGLE("Playback Deemphasis Switch", WM8523_AIF_CTRL1, 8, 1, 0), SOC_DOUBLE("Playback Switch", WM8523_DAC_CTRL3, 2, 3, 1, 1), SOC_SINGLE("Volume Ramp Up Switch", WM8523_DAC_CTRL3, 1, 1, 0), SOC_SINGLE("Volume Ramp Down Switch", WM8523_DAC_CTRL3, 0, 1, 0), SOC_ENUM("Zero Detect Count", wm8523_zc_count), }; static const struct snd_soc_dapm_widget wm8523_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUTPUT("LINEVOUTL"), SND_SOC_DAPM_OUTPUT("LINEVOUTR"), }; static const struct snd_soc_dapm_route wm8523_dapm_routes[] = { { "LINEVOUTL", NULL, "DAC" }, { "LINEVOUTR", NULL, "DAC" }, }; static struct { int value; int ratio; } lrclk_ratios[WM8523_NUM_RATES] = { { 1, 128 }, { 2, 192 }, { 3, 256 }, { 4, 384 }, { 5, 512 }, { 6, 768 }, { 7, 1152 }, }; static int wm8523_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8523->sysclk) { dev_err(codec->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &wm8523->rate_constraint); return 0; } static int wm8523_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); int i; u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1); u16 aifctrl2 = snd_soc_read(codec, WM8523_AIF_CTRL2); /* Find a supported LRCLK ratio */ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) { if (wm8523->sysclk / params_rate(params) == lrclk_ratios[i].ratio) break; } /* Should never happen, should be handled by constraints */ if (i == ARRAY_SIZE(lrclk_ratios)) { dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n", wm8523->sysclk / params_rate(params)); return -EINVAL; } aifctrl2 &= ~WM8523_SR_MASK; aifctrl2 |= lrclk_ratios[i].value; aifctrl1 &= ~WM8523_WL_MASK; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: aifctrl1 |= 0x8; break; case SNDRV_PCM_FORMAT_S24_LE: aifctrl1 |= 0x10; break; case SNDRV_PCM_FORMAT_S32_LE: aifctrl1 |= 0x18; break; } snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1); snd_soc_write(codec, WM8523_AIF_CTRL2, aifctrl2); return 0; } static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); unsigned int val; int i; wm8523->sysclk = freq; wm8523->rate_constraint.count = 0; for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) { val = freq / lrclk_ratios[i].ratio; /* Check that it's a standard rate since core can't * cope with others and having the odd rates confuses * constraint matching. */ switch (val) { case 8000: case 11025: case 16000: case 22050: case 32000: case 44100: case 48000: case 64000: case 88200: case 96000: case 176400: case 192000: dev_dbg(codec->dev, "Supported sample rate: %dHz\n", val); wm8523->rate_constraint_list[i] = val; wm8523->rate_constraint.count++; break; default: dev_dbg(codec->dev, "Skipping sample rate: %dHz\n", val); } } /* Need at least one supported rate... */ if (wm8523->rate_constraint.count == 0) return -EINVAL; return 0; } static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1); aifctrl1 &= ~(WM8523_BCLK_INV_MASK | WM8523_LRCLK_INV_MASK | WM8523_FMT_MASK | WM8523_AIF_MSTR_MASK); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: aifctrl1 |= WM8523_AIF_MSTR; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: aifctrl1 |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: aifctrl1 |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: aifctrl1 |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: aifctrl1 |= 0x0023; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: aifctrl1 |= WM8523_BCLK_INV | WM8523_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aifctrl1 |= WM8523_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: aifctrl1 |= WM8523_LRCLK_INV; break; default: return -EINVAL; } snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1); return 0; } static int wm8523_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); u16 *reg_cache = codec->reg_cache; int ret, i; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* Full power on */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 3); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); return ret; } /* Initial power up */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 1); /* Sync back default/cached values */ for (i = WM8523_AIF_CTRL1; i < WM8523_MAX_REGISTER; i++) snd_soc_write(codec, i, reg_cache[i]); msleep(100); } /* Power up to mute */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 2); break; case SND_SOC_BIAS_OFF: /* The chip runs through the power down sequence for us. */ snd_soc_update_bits(codec, WM8523_PSCTRL1, WM8523_SYS_ENA_MASK, 0); msleep(100); regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); break; } codec->dapm.bias_level = level; return 0; } #define WM8523_RATES SNDRV_PCM_RATE_8000_192000 #define WM8523_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm8523_dai_ops = { .startup = wm8523_startup, .hw_params = wm8523_hw_params, .set_sysclk = wm8523_set_dai_sysclk, .set_fmt = wm8523_set_dai_fmt, }; static struct snd_soc_dai_driver wm8523_dai = { .name = "wm8523-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, /* Mono modes not yet supported */ .channels_max = 2, .rates = WM8523_RATES, .formats = WM8523_FORMATS, }, .ops = &wm8523_dai_ops, }; #ifdef CONFIG_PM static int wm8523_suspend(struct snd_soc_codec *codec) { wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8523_resume(struct snd_soc_codec *codec) { wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define wm8523_suspend NULL #define wm8523_resume NULL #endif static int wm8523_probe(struct snd_soc_codec *codec) { struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); int ret, i; wm8523->rate_constraint.list = &wm8523->rate_constraint_list[0]; wm8523->rate_constraint.count = ARRAY_SIZE(wm8523->rate_constraint_list); ret = snd_soc_codec_set_cache_io(codec, 8, 16, wm8523->control_type); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(wm8523->supplies); i++) wm8523->supplies[i].supply = wm8523_supply_names[i]; ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to request supplies: %d\n", ret); return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); if (ret != 0) { dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); goto err_get; } ret = snd_soc_read(codec, WM8523_DEVICE_ID); if (ret < 0) { dev_err(codec->dev, "Failed to read ID register\n"); goto err_enable; } if (ret != wm8523_reg[WM8523_DEVICE_ID]) { dev_err(codec->dev, "Device is not a WM8523, ID is %x\n", ret); ret = -EINVAL; goto err_enable; } ret = snd_soc_read(codec, WM8523_REVISION); if (ret < 0) { dev_err(codec->dev, "Failed to read revision register\n"); goto err_enable; } dev_info(codec->dev, "revision %c\n", (ret & WM8523_CHIP_REV_MASK) + 'A'); ret = wm8523_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); goto err_enable; } /* Change some default settings - latch VU and enable ZC */ snd_soc_update_bits(codec, WM8523_DAC_GAINR, WM8523_DACR_VU, WM8523_DACR_VU); snd_soc_update_bits(codec, WM8523_DAC_CTRL3, WM8523_ZC, WM8523_ZC); wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* Bias level configuration will have done an extra enable */ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); return 0; err_enable: regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); err_get: regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); return ret; } static int wm8523_remove(struct snd_soc_codec *codec) { struct wm8523_priv *wm8523 = snd_soc_codec_get_drvdata(codec); wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF); regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8523 = { .probe = wm8523_probe, .remove = wm8523_remove, .suspend = wm8523_suspend, .resume = wm8523_resume, .set_bias_level = wm8523_set_bias_level, .reg_cache_size = WM8523_REGISTER_COUNT, .reg_word_size = sizeof(u16), .reg_cache_default = wm8523_reg, .volatile_register = wm8523_volatile_register, .controls = wm8523_controls, .num_controls = ARRAY_SIZE(wm8523_controls), .dapm_widgets = wm8523_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8523_dapm_widgets), .dapm_routes = wm8523_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8523_dapm_routes), }; static const struct of_device_id wm8523_of_match[] = { { .compatible = "wlf,wm8523" }, { }, }; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static __devinit int wm8523_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8523_priv *wm8523; int ret; wm8523 = kzalloc(sizeof(struct wm8523_priv), GFP_KERNEL); if (wm8523 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8523); wm8523->control_type = SND_SOC_I2C; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8523, &wm8523_dai, 1); if (ret < 0) kfree(wm8523); return ret; } static __devexit int wm8523_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); kfree(i2c_get_clientdata(client)); return 0; } static const struct i2c_device_id wm8523_i2c_id[] = { { "wm8523", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8523_i2c_id); static struct i2c_driver wm8523_i2c_driver = { .driver = { .name = "wm8523", .owner = THIS_MODULE, .of_match_table = wm8523_of_match, }, .probe = wm8523_i2c_probe, .remove = __devexit_p(wm8523_i2c_remove), .id_table = wm8523_i2c_id, }; #endif static int __init wm8523_modinit(void) { int ret; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8523_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n", ret); } #endif return 0; } module_init(wm8523_modinit); static void __exit wm8523_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8523_i2c_driver); #endif } module_exit(wm8523_exit); MODULE_DESCRIPTION("ASoC WM8523 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
ISTweak/android_kernel_htc_msm8960
net/x25/x25_dev.c
4898
4512
/* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, randomly fail to work with new * releases, misbehave and/or generally screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * X.25 001 Jonathan Naylor Started coding. * 2000-09-04 Henner Eisen Prevent freeing a dangling skb. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/if_arp.h> #include <net/x25.h> #include <net/x25device.h> static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) { struct sock *sk; unsigned short frametype; unsigned int lci; if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) return 0; frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); /* * LCI of zero is always for us, and its always a link control * frame. */ if (lci == 0) { x25_link_control(skb, nb, frametype); return 0; } /* * Find an existing socket. */ if ((sk = x25_find_socket(lci, nb)) != NULL) { int queued = 1; skb_reset_transport_header(skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { queued = !sk_add_backlog(sk, skb); } bh_unlock_sock(sk); sock_put(sk); return queued; } /* * Is is a Call Request ? if so process it. */ if (frametype == X25_CALL_REQUEST) return x25_rx_call_request(skb, nb, lci); /* * Its not a Call Request, nor is it a control frame. * Can we forward it? */ if (x25_forward_data(lci, nb, skb)) { if (frametype == X25_CLEAR_CONFIRMATION) { x25_clear_forward_by_lci(lci); } kfree_skb(skb); return 1; } /* x25_transmit_clear_request(nb, lci, 0x0D); */ if (frametype != X25_CLEAR_CONFIRMATION) printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype); return 0; } int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct sk_buff *nskb; struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) goto drop; nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) goto drop; kfree_skb(skb); skb = nskb; /* * Packet received from unrecognised device, throw it away. */ nb = x25_get_neigh(dev); if (!nb) { printk(KERN_DEBUG "X.25: unknown neighbour - %s\n", dev->name); goto drop; } if (!pskb_may_pull(skb, 1)) return 0; switch (skb->data[0]) { case X25_IFACE_DATA: skb_pull(skb, 1); if (x25_receive_data(skb, nb)) { x25_neigh_put(nb); goto out; } break; case X25_IFACE_CONNECT: x25_link_established(nb); break; case X25_IFACE_DISCONNECT: x25_link_terminated(nb); break; } x25_neigh_put(nb); drop: kfree_skb(skb); out: return 0; } void x25_establish_link(struct x25_neigh *nb) { struct sk_buff *skb; unsigned char *ptr; switch (nb->dev->type) { case ARPHRD_X25: if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) { printk(KERN_ERR "x25_dev: out of memory\n"); return; } ptr = skb_put(skb, 1); *ptr = X25_IFACE_CONNECT; break; #if IS_ENABLED(CONFIG_LLC) case ARPHRD_ETHER: return; #endif default: return; } skb->protocol = htons(ETH_P_X25); skb->dev = nb->dev; dev_queue_xmit(skb); } void x25_terminate_link(struct x25_neigh *nb) { struct sk_buff *skb; unsigned char *ptr; #if IS_ENABLED(CONFIG_LLC) if (nb->dev->type == ARPHRD_ETHER) return; #endif if (nb->dev->type != ARPHRD_X25) return; skb = alloc_skb(1, GFP_ATOMIC); if (!skb) { printk(KERN_ERR "x25_dev: out of memory\n"); return; } ptr = skb_put(skb, 1); *ptr = X25_IFACE_DISCONNECT; skb->protocol = htons(ETH_P_X25); skb->dev = nb->dev; dev_queue_xmit(skb); } void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) { unsigned char *dptr; skb_reset_network_header(skb); switch (nb->dev->type) { case ARPHRD_X25: dptr = skb_push(skb, 1); *dptr = X25_IFACE_DATA; break; #if IS_ENABLED(CONFIG_LLC) case ARPHRD_ETHER: kfree_skb(skb); return; #endif default: kfree_skb(skb); return; } skb->protocol = htons(ETH_P_X25); skb->dev = nb->dev; dev_queue_xmit(skb); }
gpl-2.0
thanhphat11/android_kernel_pantech_910
drivers/usb/host/ohci-sh.c
4898
3246
/* * OHCI HCD (Host Controller Driver) for USB. * * Copyright (C) 2008 Renesas Solutions Corp. * * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/platform_device.h> static int ohci_sh_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); ohci_hcd_init(ohci); ohci_init(ohci); ohci_run(ohci); return 0; } static const struct hc_driver ohci_sh_hc_driver = { .description = hcd_name, .product_desc = "SuperH OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .start = ohci_sh_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static int ohci_hcd_sh_probe(struct platform_device *pdev) { struct resource *res = NULL; struct usb_hcd *hcd = NULL; int irq = -1; int ret; if (usb_disabled()) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err("platform_get_resource error."); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) { err("platform_get_irq error."); return -ENODEV; } /* initialize hcd */ hcd = usb_create_hcd(&ohci_sh_hc_driver, &pdev->dev, (char *)hcd_name); if (!hcd) { err("Failed to create hcd"); return -ENOMEM; } hcd->regs = (void __iomem *)res->start; hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret != 0) { err("Failed to add hcd"); usb_put_hcd(hcd); return ret; } return ret; } static int ohci_hcd_sh_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_remove_hcd(hcd); usb_put_hcd(hcd); return 0; } static struct platform_driver ohci_hcd_sh_driver = { .probe = ohci_hcd_sh_probe, .remove = ohci_hcd_sh_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "sh_ohci", .owner = THIS_MODULE, }, }; MODULE_ALIAS("platform:sh_ohci");
gpl-2.0
settingout/android_kernel_oneplus_msm8974
sound/soc/codecs/wm8750.c
4898
24917
/* * wm8750.c -- WM8750 ALSA SoC audio driver * * Copyright 2005 Openedhand Ltd. * * Author: Richard Purdie <richard@openedhand.com> * * Based on WM8753.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8750.h" /* * wm8750 register cache * We can't read the WM8750 register space when we * are using 2 wire for device control, so we cache them instead. */ static const u16 wm8750_reg[] = { 0x0097, 0x0097, 0x0079, 0x0079, /* 0 */ 0x0000, 0x0008, 0x0000, 0x000a, /* 4 */ 0x0000, 0x0000, 0x00ff, 0x00ff, /* 8 */ 0x000f, 0x000f, 0x0000, 0x0000, /* 12 */ 0x0000, 0x007b, 0x0000, 0x0032, /* 16 */ 0x0000, 0x00c3, 0x00c3, 0x00c0, /* 20 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 24 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 28 */ 0x0000, 0x0000, 0x0050, 0x0050, /* 32 */ 0x0050, 0x0050, 0x0050, 0x0050, /* 36 */ 0x0079, 0x0079, 0x0079, /* 40 */ }; /* codec private data */ struct wm8750_priv { unsigned int sysclk; enum snd_soc_control_type control_type; }; #define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0) /* * WM8750 Controls */ static const char *wm8750_bass[] = {"Linear Control", "Adaptive Boost"}; static const char *wm8750_bass_filter[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static const char *wm8750_treble[] = {"8kHz", "4kHz"}; static const char *wm8750_3d_lc[] = {"200Hz", "500Hz"}; static const char *wm8750_3d_uc[] = {"2.2kHz", "1.5kHz"}; static const char *wm8750_3d_func[] = {"Capture", "Playback"}; static const char *wm8750_alc_func[] = {"Off", "Right", "Left", "Stereo"}; static const char *wm8750_ng_type[] = {"Constant PGA Gain", "Mute ADC Output"}; static const char *wm8750_line_mux[] = {"Line 1", "Line 2", "Line 3", "PGA", "Differential"}; static const char *wm8750_pga_sel[] = {"Line 1", "Line 2", "Line 3", "Differential"}; static const char *wm8750_out3[] = {"VREF", "ROUT1 + Vol", "MonoOut", "ROUT1"}; static const char *wm8750_diff_sel[] = {"Line 1", "Line 2"}; static const char *wm8750_adcpol[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static const char *wm8750_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"}; static const char *wm8750_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static const struct soc_enum wm8750_enum[] = { SOC_ENUM_SINGLE(WM8750_BASS, 7, 2, wm8750_bass), SOC_ENUM_SINGLE(WM8750_BASS, 6, 2, wm8750_bass_filter), SOC_ENUM_SINGLE(WM8750_TREBLE, 6, 2, wm8750_treble), SOC_ENUM_SINGLE(WM8750_3D, 5, 2, wm8750_3d_lc), SOC_ENUM_SINGLE(WM8750_3D, 6, 2, wm8750_3d_uc), SOC_ENUM_SINGLE(WM8750_3D, 7, 2, wm8750_3d_func), SOC_ENUM_SINGLE(WM8750_ALC1, 7, 4, wm8750_alc_func), SOC_ENUM_SINGLE(WM8750_NGATE, 1, 2, wm8750_ng_type), SOC_ENUM_SINGLE(WM8750_LOUTM1, 0, 5, wm8750_line_mux), SOC_ENUM_SINGLE(WM8750_ROUTM1, 0, 5, wm8750_line_mux), SOC_ENUM_SINGLE(WM8750_LADCIN, 6, 4, wm8750_pga_sel), /* 10 */ SOC_ENUM_SINGLE(WM8750_RADCIN, 6, 4, wm8750_pga_sel), SOC_ENUM_SINGLE(WM8750_ADCTL2, 7, 4, wm8750_out3), SOC_ENUM_SINGLE(WM8750_ADCIN, 8, 2, wm8750_diff_sel), SOC_ENUM_SINGLE(WM8750_ADCDAC, 5, 4, wm8750_adcpol), SOC_ENUM_SINGLE(WM8750_ADCDAC, 1, 4, wm8750_deemph), SOC_ENUM_SINGLE(WM8750_ADCIN, 6, 4, wm8750_mono_mux), /* 16 */ }; static const struct snd_kcontrol_new wm8750_snd_controls[] = { SOC_DOUBLE_R("Capture Volume", WM8750_LINVOL, WM8750_RINVOL, 0, 63, 0), SOC_DOUBLE_R("Capture ZC Switch", WM8750_LINVOL, WM8750_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8750_LINVOL, WM8750_RINVOL, 7, 1, 1), SOC_DOUBLE_R("Headphone Playback ZC Switch", WM8750_LOUT1V, WM8750_ROUT1V, 7, 1, 0), SOC_DOUBLE_R("Speaker Playback ZC Switch", WM8750_LOUT2V, WM8750_ROUT2V, 7, 1, 0), SOC_ENUM("Playback De-emphasis", wm8750_enum[15]), SOC_ENUM("Capture Polarity", wm8750_enum[14]), SOC_SINGLE("Playback 6dB Attenuate", WM8750_ADCDAC, 7, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8750_ADCDAC, 8, 1, 0), SOC_DOUBLE_R("PCM Volume", WM8750_LDAC, WM8750_RDAC, 0, 255, 0), SOC_ENUM("Bass Boost", wm8750_enum[0]), SOC_ENUM("Bass Filter", wm8750_enum[1]), SOC_SINGLE("Bass Volume", WM8750_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8750_TREBLE, 0, 15, 1), SOC_ENUM("Treble Cut-off", wm8750_enum[2]), SOC_SINGLE("3D Switch", WM8750_3D, 0, 1, 0), SOC_SINGLE("3D Volume", WM8750_3D, 1, 15, 0), SOC_ENUM("3D Lower Cut-off", wm8750_enum[3]), SOC_ENUM("3D Upper Cut-off", wm8750_enum[4]), SOC_ENUM("3D Mode", wm8750_enum[5]), SOC_SINGLE("ALC Capture Target Volume", WM8750_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8750_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", wm8750_enum[6]), SOC_SINGLE("ALC Capture ZC Switch", WM8750_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8750_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8750_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8750_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8750_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", wm8750_enum[4]), SOC_SINGLE("ALC Capture NG Switch", WM8750_NGATE, 0, 1, 0), SOC_SINGLE("Left ADC Capture Volume", WM8750_LADC, 0, 255, 0), SOC_SINGLE("Right ADC Capture Volume", WM8750_RADC, 0, 255, 0), SOC_SINGLE("ZC Timeout Switch", WM8750_ADCTL1, 0, 1, 0), SOC_SINGLE("Playback Invert Switch", WM8750_ADCTL1, 1, 1, 0), SOC_SINGLE("Right Speaker Playback Invert Switch", WM8750_ADCTL2, 4, 1, 0), /* Unimplemented */ /* ADCDAC Bit 0 - ADCHPD */ /* ADCDAC Bit 4 - HPOR */ /* ADCTL1 Bit 2,3 - DATSEL */ /* ADCTL1 Bit 4,5 - DMONOMIX */ /* ADCTL1 Bit 6,7 - VSEL */ /* ADCTL2 Bit 2 - LRCM */ /* ADCTL2 Bit 3 - TRI */ /* ADCTL3 Bit 5 - HPFLREN */ /* ADCTL3 Bit 6 - VROI */ /* ADCTL3 Bit 7,8 - ADCLRM */ /* ADCIN Bit 4 - LDCM */ /* ADCIN Bit 5 - RDCM */ SOC_DOUBLE_R("Mic Boost", WM8750_LADCIN, WM8750_RADCIN, 4, 3, 0), SOC_DOUBLE_R("Bypass Left Playback Volume", WM8750_LOUTM1, WM8750_LOUTM2, 4, 7, 1), SOC_DOUBLE_R("Bypass Right Playback Volume", WM8750_ROUTM1, WM8750_ROUTM2, 4, 7, 1), SOC_DOUBLE_R("Bypass Mono Playback Volume", WM8750_MOUTM1, WM8750_MOUTM2, 4, 7, 1), SOC_SINGLE("Mono Playback ZC Switch", WM8750_MOUTV, 7, 1, 0), SOC_DOUBLE_R("Headphone Playback Volume", WM8750_LOUT1V, WM8750_ROUT1V, 0, 127, 0), SOC_DOUBLE_R("Speaker Playback Volume", WM8750_LOUT2V, WM8750_ROUT2V, 0, 127, 0), SOC_SINGLE("Mono Playback Volume", WM8750_MOUTV, 0, 127, 0), }; /* * DAPM Controls */ /* Left Mixer */ static const struct snd_kcontrol_new wm8750_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8750_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8750_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8750_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8750_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8750_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8750_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8750_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8750_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8750_ROUTM2, 7, 1, 0), }; /* Mono Mixer */ static const struct snd_kcontrol_new wm8750_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8750_MOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8750_MOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8750_MOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8750_MOUTM2, 7, 1, 0), }; /* Left Line Mux */ static const struct snd_kcontrol_new wm8750_left_line_controls = SOC_DAPM_ENUM("Route", wm8750_enum[8]); /* Right Line Mux */ static const struct snd_kcontrol_new wm8750_right_line_controls = SOC_DAPM_ENUM("Route", wm8750_enum[9]); /* Left PGA Mux */ static const struct snd_kcontrol_new wm8750_left_pga_controls = SOC_DAPM_ENUM("Route", wm8750_enum[10]); /* Right PGA Mux */ static const struct snd_kcontrol_new wm8750_right_pga_controls = SOC_DAPM_ENUM("Route", wm8750_enum[11]); /* Out 3 Mux */ static const struct snd_kcontrol_new wm8750_out3_controls = SOC_DAPM_ENUM("Route", wm8750_enum[12]); /* Differential Mux */ static const struct snd_kcontrol_new wm8750_diffmux_controls = SOC_DAPM_ENUM("Route", wm8750_enum[13]); /* Mono ADC Mux */ static const struct snd_kcontrol_new wm8750_monomux_controls = SOC_DAPM_ENUM("Route", wm8750_enum[16]); static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8750_left_mixer_controls[0], ARRAY_SIZE(wm8750_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8750_right_mixer_controls[0], ARRAY_SIZE(wm8750_right_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", WM8750_PWR2, 2, 0, &wm8750_mono_mixer_controls[0], ARRAY_SIZE(wm8750_mono_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8750_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8750_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8750_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8750_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8750_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8750_PWR2, 8, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8750_PWR1, 1, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8750_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8750_PWR1, 3, 0), SND_SOC_DAPM_MUX("Left PGA Mux", WM8750_PWR1, 5, 0, &wm8750_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8750_PWR1, 4, 0, &wm8750_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8750_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8750_right_line_controls), SND_SOC_DAPM_MUX("Out3 Mux", SND_SOC_NOPM, 0, 0, &wm8750_out3_controls), SND_SOC_DAPM_PGA("Out 3", WM8750_PWR2, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out 1", WM8750_PWR2, 2, 0, NULL, 0), SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, &wm8750_diffmux_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8750_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8750_monomux_controls), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("MONO1"), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_OUTPUT("VREF"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("LINPUT3"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), SND_SOC_DAPM_INPUT("RINPUT3"), }; static const struct snd_soc_dapm_route wm8750_dapm_routes[] = { /* left mixer */ {"Left Mixer", "Playback Switch", "Left DAC"}, {"Left Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Left Mixer", "Right Playback Switch", "Right DAC"}, {"Left Mixer", "Right Bypass Switch", "Right Line Mux"}, /* right mixer */ {"Right Mixer", "Left Playback Switch", "Left DAC"}, {"Right Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Right Mixer", "Playback Switch", "Right DAC"}, {"Right Mixer", "Right Bypass Switch", "Right Line Mux"}, /* left out 1 */ {"Left Out 1", NULL, "Left Mixer"}, {"LOUT1", NULL, "Left Out 1"}, /* left out 2 */ {"Left Out 2", NULL, "Left Mixer"}, {"LOUT2", NULL, "Left Out 2"}, /* right out 1 */ {"Right Out 1", NULL, "Right Mixer"}, {"ROUT1", NULL, "Right Out 1"}, /* right out 2 */ {"Right Out 2", NULL, "Right Mixer"}, {"ROUT2", NULL, "Right Out 2"}, /* mono mixer */ {"Mono Mixer", "Left Playback Switch", "Left DAC"}, {"Mono Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Mono Mixer", "Right Playback Switch", "Right DAC"}, {"Mono Mixer", "Right Bypass Switch", "Right Line Mux"}, /* mono out */ {"Mono Out 1", NULL, "Mono Mixer"}, {"MONO1", NULL, "Mono Out 1"}, /* out 3 */ {"Out3 Mux", "VREF", "VREF"}, {"Out3 Mux", "ROUT1 + Vol", "ROUT1"}, {"Out3 Mux", "ROUT1", "Right Mixer"}, {"Out3 Mux", "MonoOut", "MONO1"}, {"Out 3", NULL, "Out3 Mux"}, {"OUT3", NULL, "Out 3"}, /* Left Line Mux */ {"Left Line Mux", "Line 1", "LINPUT1"}, {"Left Line Mux", "Line 2", "LINPUT2"}, {"Left Line Mux", "Line 3", "LINPUT3"}, {"Left Line Mux", "PGA", "Left PGA Mux"}, {"Left Line Mux", "Differential", "Differential Mux"}, /* Right Line Mux */ {"Right Line Mux", "Line 1", "RINPUT1"}, {"Right Line Mux", "Line 2", "RINPUT2"}, {"Right Line Mux", "Line 3", "RINPUT3"}, {"Right Line Mux", "PGA", "Right PGA Mux"}, {"Right Line Mux", "Differential", "Differential Mux"}, /* Left PGA Mux */ {"Left PGA Mux", "Line 1", "LINPUT1"}, {"Left PGA Mux", "Line 2", "LINPUT2"}, {"Left PGA Mux", "Line 3", "LINPUT3"}, {"Left PGA Mux", "Differential", "Differential Mux"}, /* Right PGA Mux */ {"Right PGA Mux", "Line 1", "RINPUT1"}, {"Right PGA Mux", "Line 2", "RINPUT2"}, {"Right PGA Mux", "Line 3", "RINPUT3"}, {"Right PGA Mux", "Differential", "Differential Mux"}, /* Differential Mux */ {"Differential Mux", "Line 1", "LINPUT1"}, {"Differential Mux", "Line 1", "RINPUT1"}, {"Differential Mux", "Line 2", "LINPUT2"}, {"Differential Mux", "Line 2", "RINPUT2"}, /* Left ADC Mux */ {"Left ADC Mux", "Stereo", "Left PGA Mux"}, {"Left ADC Mux", "Mono (Left)", "Left PGA Mux"}, {"Left ADC Mux", "Digital Mono", "Left PGA Mux"}, /* Right ADC Mux */ {"Right ADC Mux", "Stereo", "Right PGA Mux"}, {"Right ADC Mux", "Mono (Right)", "Right PGA Mux"}, {"Right ADC Mux", "Digital Mono", "Right PGA Mux"}, /* ADC */ {"Left ADC", NULL, "Left ADC Mux"}, {"Right ADC", NULL, "Right ADC Mux"}, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } printk(KERN_ERR "wm8750: could not get coeff for mclk %d @ rate %d\n", mclk, rate); return -EINVAL; } static int wm8750_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: wm8750->sysclk = freq; return 0; } return -EINVAL; } static int wm8750_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_write(codec, WM8750_IFACE, iface); return 0; } static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8750_IFACE) & 0x1f3; u16 srate = snd_soc_read(codec, WM8750_SRATE) & 0x1c0; int coeff = get_coeff(wm8750->sysclk, params_rate(params)); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_write(codec, WM8750_IFACE, iface); if (coeff >= 0) snd_soc_write(codec, WM8750_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8750_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8750_ADCDAC) & 0xfff7; if (mute) snd_soc_write(codec, WM8750_ADCDAC, mute_reg | 0x8); else snd_soc_write(codec, WM8750_ADCDAC, mute_reg); return 0; } static int wm8750_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 pwr_reg = snd_soc_read(codec, WM8750_PWR1) & 0xfe3e; switch (level) { case SND_SOC_BIAS_ON: /* set vmid to 50k and unmute dac */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { snd_soc_cache_sync(codec); /* Set VMID to 5k */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1); /* ...and ramp */ msleep(1000); } /* mute dac and set vmid to 500k, enable VREF */ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8750_PWR1, 0x0001); break; } codec->dapm.bias_level = level; return 0; } #define WM8750_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define WM8750_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8750_dai_ops = { .hw_params = wm8750_pcm_hw_params, .digital_mute = wm8750_mute, .set_fmt = wm8750_set_dai_fmt, .set_sysclk = wm8750_set_dai_sysclk, }; static struct snd_soc_dai_driver wm8750_dai = { .name = "wm8750-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8750_RATES, .formats = WM8750_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8750_RATES, .formats = WM8750_FORMATS,}, .ops = &wm8750_dai_ops, }; static int wm8750_suspend(struct snd_soc_codec *codec) { wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8750_resume(struct snd_soc_codec *codec) { wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8750_probe(struct snd_soc_codec *codec) { struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec); int ret; ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8750->control_type); if (ret < 0) { printk(KERN_ERR "wm8750: failed to set cache I/O: %d\n", ret); return ret; } ret = wm8750_reset(codec); if (ret < 0) { printk(KERN_ERR "wm8750: failed to reset: %d\n", ret); return ret; } /* charge output caps */ wm8750_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* set the update bits */ snd_soc_update_bits(codec, WM8750_LDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_LOUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_LOUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_ROUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_LINVOL, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8750_RINVOL, 0x0100, 0x0100); return ret; } static int wm8750_remove(struct snd_soc_codec *codec) { wm8750_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8750 = { .probe = wm8750_probe, .remove = wm8750_remove, .suspend = wm8750_suspend, .resume = wm8750_resume, .set_bias_level = wm8750_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm8750_reg), .reg_word_size = sizeof(u16), .reg_cache_default = wm8750_reg, .controls = wm8750_snd_controls, .num_controls = ARRAY_SIZE(wm8750_snd_controls), .dapm_widgets = wm8750_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets), .dapm_routes = wm8750_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8750_dapm_routes), }; static const struct of_device_id wm8750_of_match[] = { { .compatible = "wlf,wm8750", }, { .compatible = "wlf,wm8987", }, { } }; MODULE_DEVICE_TABLE(of, wm8750_of_match); #if defined(CONFIG_SPI_MASTER) static int __devinit wm8750_spi_probe(struct spi_device *spi) { struct wm8750_priv *wm8750; int ret; wm8750 = devm_kzalloc(&spi->dev, sizeof(struct wm8750_priv), GFP_KERNEL); if (wm8750 == NULL) return -ENOMEM; wm8750->control_type = SND_SOC_SPI; spi_set_drvdata(spi, wm8750); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8750, &wm8750_dai, 1); return ret; } static int __devexit wm8750_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static const struct spi_device_id wm8750_spi_ids[] = { { "wm8750", 0 }, { "wm8987", 0 }, { }, }; MODULE_DEVICE_TABLE(spi, wm8750_spi_ids); static struct spi_driver wm8750_spi_driver = { .driver = { .name = "wm8750", .owner = THIS_MODULE, .of_match_table = wm8750_of_match, }, .id_table = wm8750_spi_ids, .probe = wm8750_spi_probe, .remove = __devexit_p(wm8750_spi_remove), }; #endif /* CONFIG_SPI_MASTER */ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static __devinit int wm8750_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8750_priv *wm8750; int ret; wm8750 = devm_kzalloc(&i2c->dev, sizeof(struct wm8750_priv), GFP_KERNEL); if (wm8750 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8750); wm8750->control_type = SND_SOC_I2C; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8750, &wm8750_dai, 1); return ret; } static __devexit int wm8750_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8750_i2c_id[] = { { "wm8750", 0 }, { "wm8987", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8750_i2c_id); static struct i2c_driver wm8750_i2c_driver = { .driver = { .name = "wm8750", .owner = THIS_MODULE, .of_match_table = wm8750_of_match, }, .probe = wm8750_i2c_probe, .remove = __devexit_p(wm8750_i2c_remove), .id_table = wm8750_i2c_id, }; #endif static int __init wm8750_modinit(void) { int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8750_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8750 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8750_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register wm8750 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8750_modinit); static void __exit wm8750_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8750_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8750_spi_driver); #endif } module_exit(wm8750_exit); MODULE_DESCRIPTION("ASoC WM8750 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL");
gpl-2.0
newkid313/kernel_raybst
drivers/gpu/drm/i915/dvo_ivch.c
5666
10014
/* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * */ #include "dvo.h" /* * register definitions for the i82807aa. * * Documentation on this chipset can be found in datasheet #29069001 at * intel.com. */ /* * VCH Revision & GMBus Base Addr */ #define VR00 0x00 # define VR00_BASE_ADDRESS_MASK 0x007f /* * Functionality Enable */ #define VR01 0x01 /* * Enable the panel fitter */ # define VR01_PANEL_FIT_ENABLE (1 << 3) /* * Enables the LCD display. * * This must not be set while VR01_DVO_BYPASS_ENABLE is set. */ # define VR01_LCD_ENABLE (1 << 2) /** Enables the DVO repeater. */ # define VR01_DVO_BYPASS_ENABLE (1 << 1) /** Enables the DVO clock */ # define VR01_DVO_ENABLE (1 << 0) /* * LCD Interface Format */ #define VR10 0x10 /** Enables LVDS output instead of CMOS */ # define VR10_LVDS_ENABLE (1 << 4) /** Enables 18-bit LVDS output. */ # define VR10_INTERFACE_1X18 (0 << 2) /** Enables 24-bit LVDS or CMOS output */ # define VR10_INTERFACE_1X24 (1 << 2) /** Enables 2x18-bit LVDS or CMOS output. */ # define VR10_INTERFACE_2X18 (2 << 2) /** Enables 2x24-bit LVDS output */ # define VR10_INTERFACE_2X24 (3 << 2) /* * VR20 LCD Horizontal Display Size */ #define VR20 0x20 /* * LCD Vertical Display Size */ #define VR21 0x20 /* * Panel power down status */ #define VR30 0x30 /** Read only bit indicating that the panel is not in a safe poweroff state. */ # define VR30_PANEL_ON (1 << 15) #define VR40 0x40 # define VR40_STALL_ENABLE (1 << 13) # define VR40_VERTICAL_INTERP_ENABLE (1 << 12) # define VR40_ENHANCED_PANEL_FITTING (1 << 11) # define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10) # define VR40_AUTO_RATIO_ENABLE (1 << 9) # define VR40_CLOCK_GATING_ENABLE (1 << 8) /* * Panel Fitting Vertical Ratio * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2 */ #define VR41 0x41 /* * Panel Fitting Horizontal Ratio * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2 */ #define VR42 0x42 /* * Horizontal Image Size */ #define VR43 0x43 /* VR80 GPIO 0 */ #define VR80 0x80 #define VR81 0x81 #define VR82 0x82 #define VR83 0x83 #define VR84 0x84 #define VR85 0x85 #define VR86 0x86 #define VR87 0x87 /* VR88 GPIO 8 */ #define VR88 0x88 /* Graphics BIOS scratch 0 */ #define VR8E 0x8E # define VR8E_PANEL_TYPE_MASK (0xf << 0) # define VR8E_PANEL_INTERFACE_CMOS (0 << 4) # define VR8E_PANEL_INTERFACE_LVDS (1 << 4) # define VR8E_FORCE_DEFAULT_PANEL (1 << 5) /* Graphics BIOS scratch 1 */ #define VR8F 0x8F # define VR8F_VCH_PRESENT (1 << 0) # define VR8F_DISPLAY_CONN (1 << 1) # define VR8F_POWER_MASK (0x3c) # define VR8F_POWER_POS (2) struct ivch_priv { bool quiet; uint16_t width, height; }; static void ivch_dump_regs(struct intel_dvo_device *dvo); /** * Reads a register on the ivch. * * Each of the 256 registers are 16 bits long. */ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[1]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 0, }, { .addr = 0, .flags = I2C_M_NOSTART, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD | I2C_M_NOSTART, .len = 2, .buf = in_buf, } }; out_buf[0] = addr; if (i2c_transfer(adapter, msgs, 3) == 3) { *data = (in_buf[1] << 8) | in_buf[0]; return true; }; if (!priv->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from " "%s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } /** Writes a 16-bit register on the ivch */ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[3]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 3, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = data & 0xff; out_buf[2] = data >> 8; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!priv->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } /** Probes the given bus and slave address for an ivch */ static bool ivch_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { struct ivch_priv *priv; uint16_t temp; priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); if (priv == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = priv; priv->quiet = true; if (!ivch_read(dvo, VR00, &temp)) goto out; priv->quiet = false; /* Since the identification bits are probably zeroes, which doesn't seem * very unique, check that the value in the base address field matches * the address it's responding on. */ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { DRM_DEBUG_KMS("ivch detect failed due to address mismatch " "(%d vs %d)\n", (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); goto out; } ivch_read(dvo, VR20, &priv->width); ivch_read(dvo, VR21, &priv->height); return true; out: kfree(priv); return false; } static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo) { return connector_status_connected; } static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { if (mode->clock > 112000) return MODE_CLOCK_HIGH; return MODE_OK; } /** Sets the power state of the panel connected to the ivch */ static void ivch_dpms(struct intel_dvo_device *dvo, int mode) { int i; uint16_t vr01, vr30, backlight; /* Set the new power state of the panel. */ if (!ivch_read(dvo, VR01, &vr01)) return; if (mode == DRM_MODE_DPMS_ON) backlight = 1; else backlight = 0; ivch_write(dvo, VR80, backlight); if (mode == DRM_MODE_DPMS_ON) vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; else vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); ivch_write(dvo, VR01, vr01); /* Wait for the panel to make its state transition */ for (i = 0; i < 100; i++) { if (!ivch_read(dvo, VR30, &vr30)) break; if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) break; udelay(1000); } /* wait some more; vch may fail to resync sometimes without this */ udelay(16 * 1000); } static void ivch_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { uint16_t vr40 = 0; uint16_t vr01; vr01 = 0; vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | VR40_HORIZONTAL_INTERP_ENABLE); if (mode->hdisplay != adjusted_mode->hdisplay || mode->vdisplay != adjusted_mode->vdisplay) { uint16_t x_ratio, y_ratio; vr01 |= VR01_PANEL_FIT_ENABLE; vr40 |= VR40_CLOCK_GATING_ENABLE; x_ratio = (((mode->hdisplay - 1) << 16) / (adjusted_mode->hdisplay - 1)) >> 2; y_ratio = (((mode->vdisplay - 1) << 16) / (adjusted_mode->vdisplay - 1)) >> 2; ivch_write(dvo, VR42, x_ratio); ivch_write(dvo, VR41, y_ratio); } else { vr01 &= ~VR01_PANEL_FIT_ENABLE; vr40 &= ~VR40_CLOCK_GATING_ENABLE; } vr40 &= ~VR40_AUTO_RATIO_ENABLE; ivch_write(dvo, VR01, vr01); ivch_write(dvo, VR40, vr40); ivch_dump_regs(dvo); } static void ivch_dump_regs(struct intel_dvo_device *dvo) { uint16_t val; ivch_read(dvo, VR00, &val); DRM_LOG_KMS("VR00: 0x%04x\n", val); ivch_read(dvo, VR01, &val); DRM_LOG_KMS("VR01: 0x%04x\n", val); ivch_read(dvo, VR30, &val); DRM_LOG_KMS("VR30: 0x%04x\n", val); ivch_read(dvo, VR40, &val); DRM_LOG_KMS("VR40: 0x%04x\n", val); /* GPIO registers */ ivch_read(dvo, VR80, &val); DRM_LOG_KMS("VR80: 0x%04x\n", val); ivch_read(dvo, VR81, &val); DRM_LOG_KMS("VR81: 0x%04x\n", val); ivch_read(dvo, VR82, &val); DRM_LOG_KMS("VR82: 0x%04x\n", val); ivch_read(dvo, VR83, &val); DRM_LOG_KMS("VR83: 0x%04x\n", val); ivch_read(dvo, VR84, &val); DRM_LOG_KMS("VR84: 0x%04x\n", val); ivch_read(dvo, VR85, &val); DRM_LOG_KMS("VR85: 0x%04x\n", val); ivch_read(dvo, VR86, &val); DRM_LOG_KMS("VR86: 0x%04x\n", val); ivch_read(dvo, VR87, &val); DRM_LOG_KMS("VR87: 0x%04x\n", val); ivch_read(dvo, VR88, &val); DRM_LOG_KMS("VR88: 0x%04x\n", val); /* Scratch register 0 - AIM Panel type */ ivch_read(dvo, VR8E, &val); DRM_LOG_KMS("VR8E: 0x%04x\n", val); /* Scratch register 1 - Status register */ ivch_read(dvo, VR8F, &val); DRM_LOG_KMS("VR8F: 0x%04x\n", val); } static void ivch_destroy(struct intel_dvo_device *dvo) { struct ivch_priv *priv = dvo->dev_priv; if (priv) { kfree(priv); dvo->dev_priv = NULL; } } struct intel_dvo_dev_ops ivch_ops = { .init = ivch_init, .dpms = ivch_dpms, .mode_valid = ivch_mode_valid, .mode_set = ivch_mode_set, .detect = ivch_detect, .dump_regs = ivch_dump_regs, .destroy = ivch_destroy, };
gpl-2.0
Yuskey1989/Nexus_5
net/ipv4/xfrm4_input.c
8226
4269
/* * xfrm4_input.c * * Changes: * YOSHIFUJI Hideaki @USAGI * Split up af-specific portion * Derek Atkins <derek@ihtfp.com> * Add Encapsulation support * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <net/ip.h> #include <net/xfrm.h> int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) { return xfrm4_extract_header(skb); } static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) { if (skb_dst(skb) == NULL) { const struct iphdr *iph = ip_hdr(skb); if (ip_route_input_noref(skb, iph->daddr, iph->saddr, iph->tos, skb->dev)) goto drop; } return dst_input(skb); drop: kfree_skb(skb); return NET_RX_DROP; } int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { XFRM_SPI_SKB_CB(skb)->family = AF_INET; XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); return xfrm_input(skb, nexthdr, spi, encap_type); } EXPORT_SYMBOL(xfrm4_rcv_encap); int xfrm4_transport_finish(struct sk_buff *skb, int async) { struct iphdr *iph = ip_hdr(skb); iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; #ifndef CONFIG_NETFILTER if (!async) return -iph->protocol; #endif __skb_push(skb, skb->data - skb_network_header(skb)); iph->tot_len = htons(skb->len); ip_send_check(iph); NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); return 0; } /* If it's a keepalive packet, then just eat it. * If it's an encapsulated packet, then pass it to the * IPsec xfrm input. * Returns 0 if skb passed to xfrm or was dropped. * Returns >0 if skb should be passed to UDP. * Returns <0 if skb should be resubmitted (-ret is protocol) */ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); struct udphdr *uh; struct iphdr *iph; int iphlen, len; __u8 *udpdata; __be32 *udpdata32; __u16 encap_type = up->encap_type; /* if this is not encapsulated socket, then just return now */ if (!encap_type) return 1; /* If this is a paged skb, make sure we pull up * whatever data we need to look at. */ len = skb->len - sizeof(struct udphdr); if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) return 1; /* Now we can get the pointers */ uh = udp_hdr(skb); udpdata = (__u8 *)uh + sizeof(struct udphdr); udpdata32 = (__be32 *)udpdata; switch (encap_type) { default: case UDP_ENCAP_ESPINUDP: /* Check if this is a keepalive packet. If so, eat it. */ if (len == 1 && udpdata[0] == 0xff) { goto drop; } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { /* ESP Packet without Non-ESP header */ len = sizeof(struct udphdr); } else /* Must be an IKE packet.. pass it through */ return 1; break; case UDP_ENCAP_ESPINUDP_NON_IKE: /* Check if this is a keepalive packet. If so, eat it. */ if (len == 1 && udpdata[0] == 0xff) { goto drop; } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && udpdata32[0] == 0 && udpdata32[1] == 0) { /* ESP Packet with Non-IKE marker */ len = sizeof(struct udphdr) + 2 * sizeof(u32); } else /* Must be an IKE packet.. pass it through */ return 1; break; } /* At this point we are sure that this is an ESPinUDP packet, * so we need to remove 'len' bytes from the packet (the UDP * header and optional ESP marker bytes) and then modify the * protocol to ESP, and then call into the transform receiver. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; /* Now we can update and verify the packet length... */ iph = ip_hdr(skb); iphlen = iph->ihl << 2; iph->tot_len = htons(ntohs(iph->tot_len) - len); if (skb->len < iphlen + len) { /* packet is too small!?! */ goto drop; } /* pull the data buffer up to the ESP header and set the * transport header to point to ESP. Keep UDP on the stack * for later. */ __skb_pull(skb, len); skb_reset_transport_header(skb); /* process ESP */ return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type); drop: kfree_skb(skb); return 0; } int xfrm4_rcv(struct sk_buff *skb) { return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0); } EXPORT_SYMBOL(xfrm4_rcv);
gpl-2.0
Renzo-Olivares/android_kernel_htc_m7wlv
drivers/media/video/uvc/uvc_isight.c
9762
3903
/* * uvc_isight.c -- USB Video Class driver - iSight support * * Copyright (C) 2006-2007 * Ivan N. Zlatev <contact@i-nz.net> * Copyright (C) 2008-2009 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/usb.h> #include <linux/kernel.h> #include <linux/mm.h> #include "uvcvideo.h" /* Built-in iSight webcams implements most of UVC 1.0 except a * different packet format. Instead of sending a header at the * beginning of each isochronous transfer payload, the webcam sends a * single header per image (on its own in a packet), followed by * packets containing data only. * * Offset Size (bytes) Description * ------------------------------------------------------------------ * 0x00 1 Header length * 0x01 1 Flags (UVC-compliant) * 0x02 4 Always equal to '11223344' * 0x06 8 Always equal to 'deadbeefdeadface' * 0x0e 16 Unknown * * The header can be prefixed by an optional, unknown-purpose byte. */ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf, const __u8 *data, unsigned int len) { static const __u8 hdr[] = { 0x11, 0x22, 0x33, 0x44, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xfa, 0xce }; unsigned int maxlen, nbytes; __u8 *mem; int is_header = 0; if (buf == NULL) return 0; if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) || (len >= 15 && memcmp(&data[3], hdr, 12) == 0)) { uvc_trace(UVC_TRACE_FRAME, "iSight header found\n"); is_header = 1; } /* Synchronize to the input stream by waiting for a header packet. */ if (buf->state != UVC_BUF_STATE_ACTIVE) { if (!is_header) { uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of " "sync).\n"); return 0; } buf->state = UVC_BUF_STATE_ACTIVE; } /* Mark the buffer as done if we're at the beginning of a new frame. * * Empty buffers (bytesused == 0) don't trigger end of frame detection * as it doesn't make sense to return an empty buffer. */ if (is_header && buf->bytesused != 0) { buf->state = UVC_BUF_STATE_DONE; return -EAGAIN; } /* Copy the video data to the buffer. Skip header packets, as they * contain no data. */ if (!is_header) { maxlen = buf->length - buf->bytesused; mem = buf->mem + buf->bytesused; nbytes = min(len, maxlen); memcpy(mem, data, nbytes); buf->bytesused += nbytes; if (len > maxlen || buf->bytesused == buf->length) { uvc_trace(UVC_TRACE_FRAME, "Frame complete " "(overflow).\n"); buf->state = UVC_BUF_STATE_DONE; } } return 0; } void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream, struct uvc_buffer *buf) { int ret, i; for (i = 0; i < urb->number_of_packets; ++i) { if (urb->iso_frame_desc[i].status < 0) { uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame " "lost (%d).\n", urb->iso_frame_desc[i].status); } /* Decode the payload packet. * uvc_video_decode is entered twice when a frame transition * has been detected because the end of frame can only be * reliably detected when the first packet of the new frame * is processed. The first pass detects the transition and * closes the previous frame's buffer, the second pass * processes the data of the first payload of the new frame. */ do { ret = isight_decode(&stream->queue, buf, urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); if (buf == NULL) break; if (buf->state == UVC_BUF_STATE_DONE || buf->state == UVC_BUF_STATE_ERROR) buf = uvc_queue_next_buffer(&stream->queue, buf); } while (ret == -EAGAIN); } }
gpl-2.0
chillstep1998/AK-OnePone
fs/befs/super.c
13346
3108
/* * super.c * * Copyright (C) 2001-2002 Will Dyson <will_dyson@pobox.com> * * Licensed under the GNU GPL. See the file COPYING for details. * */ #include <linux/fs.h> #include <asm/page.h> /* for PAGE_SIZE */ #include "befs.h" #include "super.h" /** * load_befs_sb -- Read from disk and properly byteswap all the fields * of the befs superblock * * * * */ int befs_load_sb(struct super_block *sb, befs_super_block * disk_sb) { befs_sb_info *befs_sb = BEFS_SB(sb); /* Check the byte order of the filesystem */ if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_LE) befs_sb->byte_order = BEFS_BYTESEX_LE; else if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_BE) befs_sb->byte_order = BEFS_BYTESEX_BE; befs_sb->magic1 = fs32_to_cpu(sb, disk_sb->magic1); befs_sb->magic2 = fs32_to_cpu(sb, disk_sb->magic2); befs_sb->magic3 = fs32_to_cpu(sb, disk_sb->magic3); befs_sb->block_size = fs32_to_cpu(sb, disk_sb->block_size); befs_sb->block_shift = fs32_to_cpu(sb, disk_sb->block_shift); befs_sb->num_blocks = fs64_to_cpu(sb, disk_sb->num_blocks); befs_sb->used_blocks = fs64_to_cpu(sb, disk_sb->used_blocks); befs_sb->inode_size = fs32_to_cpu(sb, disk_sb->inode_size); befs_sb->blocks_per_ag = fs32_to_cpu(sb, disk_sb->blocks_per_ag); befs_sb->ag_shift = fs32_to_cpu(sb, disk_sb->ag_shift); befs_sb->num_ags = fs32_to_cpu(sb, disk_sb->num_ags); befs_sb->log_blocks = fsrun_to_cpu(sb, disk_sb->log_blocks); befs_sb->log_start = fs64_to_cpu(sb, disk_sb->log_start); befs_sb->log_end = fs64_to_cpu(sb, disk_sb->log_end); befs_sb->root_dir = fsrun_to_cpu(sb, disk_sb->root_dir); befs_sb->indices = fsrun_to_cpu(sb, disk_sb->indices); befs_sb->nls = NULL; return BEFS_OK; } int befs_check_sb(struct super_block *sb) { befs_sb_info *befs_sb = BEFS_SB(sb); /* Check magic headers of super block */ if ((befs_sb->magic1 != BEFS_SUPER_MAGIC1) || (befs_sb->magic2 != BEFS_SUPER_MAGIC2) || (befs_sb->magic3 != BEFS_SUPER_MAGIC3)) { befs_error(sb, "invalid magic header"); return BEFS_ERR; } /* * Check blocksize of BEFS. * * Blocksize of BEFS is 1024, 2048, 4096 or 8192. */ if ((befs_sb->block_size != 1024) && (befs_sb->block_size != 2048) && (befs_sb->block_size != 4096) && (befs_sb->block_size != 8192)) { befs_error(sb, "invalid blocksize: %u", befs_sb->block_size); return BEFS_ERR; } if (befs_sb->block_size > PAGE_SIZE) { befs_error(sb, "blocksize(%u) cannot be larger" "than system pagesize(%lu)", befs_sb->block_size, PAGE_SIZE); return BEFS_ERR; } /* * block_shift and block_size encode the same information * in different ways as a consistency check. */ if ((1 << befs_sb->block_shift) != befs_sb->block_size) { befs_error(sb, "block_shift disagrees with block_size. " "Corruption likely."); return BEFS_ERR; } if (befs_sb->log_start != befs_sb->log_end) { befs_error(sb, "Filesystem not clean! There are blocks in the " "journal. You must boot into BeOS and mount this volume " "to make it clean."); return BEFS_ERR; } return BEFS_OK; }
gpl-2.0
Grarak/grakernel-n1
drivers/video/tegra/host/host1x/host1x_intr.c
35
6586
/* * drivers/video/tegra/host/host1x/host1x_intr.c * * Tegra Graphics Host Interrupt Management * * Copyright (c) 2010-2012, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include "nvhost_intr.h" #include "dev.h" #include "host1x_hardware.h" /*** HW host sync management ***/ static void t20_intr_init_host_sync(struct nvhost_intr *intr) { struct nvhost_master *dev = intr_to_dev(intr); void __iomem *sync_regs = dev->sync_aperture; /* disable the ip_busy_timeout. this prevents write drops, etc. * there's no real way to recover from a hung client anyway. */ writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT); /* increase the auto-ack timout to the maximum value. 2d will hang * otherwise on ap20. */ writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG); } static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm) { struct nvhost_master *dev = intr_to_dev(intr); void __iomem *sync_regs = dev->sync_aperture; /* write microsecond clock register */ writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK); } static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr, u32 id, u32 thresh) { struct nvhost_master *dev = intr_to_dev(intr); void __iomem *sync_regs = dev->sync_aperture; thresh &= 0xffff; writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4)); } static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id) { struct nvhost_master *dev = intr_to_dev(intr); void __iomem *sync_regs = dev->sync_aperture; writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0); } static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr) { struct nvhost_master *dev = intr_to_dev(intr); void __iomem *sync_regs = dev->sync_aperture; /* disable interrupts for both cpu's */ writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); /* clear status for both cpu's */ writel(0xffffffffu, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); writel(0xffffffffu, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS); } /** * Sync point threshold interrupt service function * Handles sync point threshold triggers, in interrupt context */ irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id) { struct nvhost_intr_syncpt *syncpt = dev_id; unsigned int id = syncpt->id; struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt); void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE); writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS); return IRQ_WAKE_THREAD; } /** * Host general interrupt service function * Handles read / write failures */ static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id) { struct nvhost_intr *intr = dev_id; void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; u32 stat; u32 ext_stat; u32 addr; stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS); ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_READ_INT, ext_stat)) { addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR); pr_err("Host read timeout at address %x\n", addr); } if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_WRITE_INT, ext_stat)) { addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR); pr_err("Host write timeout at address %x\n", addr); } writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS); return IRQ_HANDLED; } static int t20_intr_request_host_general_irq(struct nvhost_intr *intr) { void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; int err; if (intr->host_general_irq_requested) return 0; /* master disable for general (not syncpt) host interrupts */ writel(0, sync_regs + HOST1X_SYNC_INTMASK); /* clear status & extstatus */ writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT); writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS); err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0, "host_status", intr); if (err) return err; /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */ writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT); /* enable extra interrupt sources */ writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK); /* enable host module interrupt to CPU0 */ writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK); /* master enable for general (not syncpt) host interrupts */ writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK); intr->host_general_irq_requested = true; return err; } static void t20_intr_free_host_general_irq(struct nvhost_intr *intr) { if (intr->host_general_irq_requested) { void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture; /* master disable for general (not syncpt) host interrupts */ writel(0, sync_regs + HOST1X_SYNC_INTMASK); free_irq(intr->host_general_irq, intr); intr->host_general_irq_requested = false; } } static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt) { int err; if (syncpt->irq_requested) return 0; err = request_threaded_irq(syncpt->irq, t20_intr_syncpt_thresh_isr, nvhost_syncpt_thresh_fn, 0, syncpt->thresh_irq_name, syncpt); if (err) return err; syncpt->irq_requested = 1; return 0; } int nvhost_init_t20_intr_support(struct nvhost_master *host) { host->op.intr.init_host_sync = t20_intr_init_host_sync; host->op.intr.set_host_clocks_per_usec = t20_intr_set_host_clocks_per_usec; host->op.intr.set_syncpt_threshold = t20_intr_set_syncpt_threshold; host->op.intr.enable_syncpt_intr = t20_intr_enable_syncpt_intr; host->op.intr.disable_all_syncpt_intrs = t20_intr_disable_all_syncpt_intrs; host->op.intr.request_host_general_irq = t20_intr_request_host_general_irq; host->op.intr.free_host_general_irq = t20_intr_free_host_general_irq; host->op.intr.request_syncpt_irq = t20_request_syncpt_irq; return 0; }
gpl-2.0
Fe-Pi/linux
kernel/trace/blktrace.c
35
46371
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> * */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blktrace_api.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/export.h> #include <linux/time.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/blk-cgroup.h> #include "../../block/blk.h" #include <trace/events/block.h> #include "trace_output.h" #ifdef CONFIG_BLK_DEV_IO_TRACE static unsigned int blktrace_seq __read_mostly = 1; static struct trace_array *blk_tr; static bool blk_tracer_enabled __read_mostly; static LIST_HEAD(running_trace_list); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); /* Select an alternative, minimalistic output than the original one */ #define TRACE_BLK_OPT_CLASSIC 0x1 #define TRACE_BLK_OPT_CGROUP 0x2 #define TRACE_BLK_OPT_CGNAME 0x4 static struct tracer_opt blk_tracer_opts[] = { /* Default disable the minimalistic output */ { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, #ifdef CONFIG_BLK_CGROUP { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) }, { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) }, #endif { } }; static struct tracer_flags blk_tracer_flags = { .val = 0, .opts = blk_tracer_opts, }; /* Global reference count of probes */ static DEFINE_MUTEX(blk_probe_mutex); static int blk_probes_ref; static void blk_register_tracepoints(void); static void blk_unregister_tracepoints(void); /* * Send out a notify message. */ static void trace_note(struct blk_trace *bt, pid_t pid, int action, const void *data, size_t len, union kernfs_node_id *cgid) { struct blk_io_trace *t; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; int pc = 0; int cpu = smp_processor_id(); bool blk_tracer = blk_tracer_enabled; ssize_t cgid_len = cgid ? sizeof(*cgid) : 0; if (blk_tracer) { buffer = blk_tr->trace_buffer.buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + len + cgid_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } if (!bt->rchan) return; t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); if (t) { t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->time = ktime_to_ns(ktime_get()); record_it: t->device = bt->dev; t->action = action | (cgid ? __BLK_TN_CGROUP : 0); t->pid = pid; t->cpu = cpu; t->pdu_len = len + cgid_len; if (cgid) memcpy((void *)t + sizeof(*t), cgid, cgid_len); memcpy((void *) t + sizeof(*t) + cgid_len, data, len); if (blk_tracer) trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); } } /* * Send out a notify for this process, if we haven't done so since a trace * started */ static void trace_note_tsk(struct task_struct *tsk) { unsigned long flags; struct blk_trace *bt; tsk->btrace_seq = blktrace_seq; spin_lock_irqsave(&running_trace_lock, flags); list_for_each_entry(bt, &running_trace_list, running_list) { trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm), NULL); } spin_unlock_irqrestore(&running_trace_lock, flags); } static void trace_note_time(struct blk_trace *bt) { struct timespec64 now; unsigned long flags; u32 words[2]; /* need to check user space to see if this breaks in y2038 or y2106 */ ktime_get_real_ts64(&now); words[0] = (u32)now.tv_sec; words[1] = now.tv_nsec; local_irq_save(flags); trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL); local_irq_restore(flags); } void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg, const char *fmt, ...) { int n; va_list args; unsigned long flags; char *buf; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer_enabled)) return; /* * If the BLK_TC_NOTIFY action mask isn't set, don't send any note * message to the trace. */ if (!(bt->act_mask & BLK_TC_NOTIFY)) return; local_irq_save(flags); buf = this_cpu_ptr(bt->msg_data); va_start(args, fmt); n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); va_end(args); if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) blkcg = NULL; #ifdef CONFIG_BLK_CGROUP trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL); #else trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL); #endif local_irq_restore(flags); } EXPORT_SYMBOL_GPL(__trace_note_message); static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, pid_t pid) { if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) return 1; if (sector && (sector < bt->start_lba || sector > bt->end_lba)) return 1; if (bt->pid && pid != bt->pid) return 1; return 0; } /* * Data direction bit lookup */ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; #define BLK_TC_RAHEAD BLK_TC_AHEAD #define BLK_TC_PREFLUSH BLK_TC_FLUSH /* The ilog2() calls fall out because they're constant */ #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) /* * The worker for the various blk_add_trace*() types. Fills out a * blk_io_trace structure and places it in a per-cpu subbuffer. */ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int op, int op_flags, u32 what, int error, int pdu_len, void *pdu_data, union kernfs_node_id *cgid) { struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct ring_buffer *buffer = NULL; struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; pid_t pid; int cpu, pc = 0; bool blk_tracer = blk_tracer_enabled; ssize_t cgid_len = cgid ? sizeof(*cgid) : 0; if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) return; what |= ddir_act[op_is_write(op) ? WRITE : READ]; what |= MASK_TC_BIT(op_flags, SYNC); what |= MASK_TC_BIT(op_flags, RAHEAD); what |= MASK_TC_BIT(op_flags, META); what |= MASK_TC_BIT(op_flags, PREFLUSH); what |= MASK_TC_BIT(op_flags, FUA); if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) what |= BLK_TC_ACT(BLK_TC_DISCARD); if (op == REQ_OP_FLUSH) what |= BLK_TC_ACT(BLK_TC_FLUSH); if (cgid) what |= __BLK_TA_CGROUP; pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) return; cpu = raw_smp_processor_id(); if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->trace_buffer.buffer; pc = preempt_count(); event = trace_buffer_lock_reserve(buffer, TRACE_BLK, sizeof(*t) + pdu_len + cgid_len, 0, pc); if (!event) return; t = ring_buffer_event_data(event); goto record_it; } if (unlikely(tsk->btrace_seq != blktrace_seq)) trace_note_tsk(tsk); /* * A word about the locking here - we disable interrupts to reserve * some space in the relay per-cpu buffer, to prevent an irq * from coming in and stepping on our toes. */ local_irq_save(flags); t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len); if (t) { sequence = per_cpu_ptr(bt->sequence, cpu); t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->sequence = ++(*sequence); t->time = ktime_to_ns(ktime_get()); record_it: /* * These two are not needed in ftrace as they are in the * generic trace_entry, filled by tracing_generic_entry_update, * but for the trace_event->bin() synthesizer benefit we do it * here too. */ t->cpu = cpu; t->pid = pid; t->sector = sector; t->bytes = bytes; t->action = what; t->device = bt->dev; t->error = error; t->pdu_len = pdu_len + cgid_len; if (cgid_len) memcpy((void *)t + sizeof(*t), cgid, cgid_len); if (pdu_len) memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); if (blk_tracer) { trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); return; } } local_irq_restore(flags); } static void blk_trace_free(struct blk_trace *bt) { debugfs_remove(bt->msg_file); debugfs_remove(bt->dropped_file); relay_close(bt->rchan); debugfs_remove(bt->dir); free_percpu(bt->sequence); free_percpu(bt->msg_data); kfree(bt); } static void get_probe_ref(void) { mutex_lock(&blk_probe_mutex); if (++blk_probes_ref == 1) blk_register_tracepoints(); mutex_unlock(&blk_probe_mutex); } static void put_probe_ref(void) { mutex_lock(&blk_probe_mutex); if (!--blk_probes_ref) blk_unregister_tracepoints(); mutex_unlock(&blk_probe_mutex); } static void blk_trace_cleanup(struct blk_trace *bt) { blk_trace_free(bt); put_probe_ref(); } static int __blk_trace_remove(struct request_queue *q) { struct blk_trace *bt; bt = xchg(&q->blk_trace, NULL); if (!bt) return -EINVAL; if (bt->trace_state != Blktrace_running) blk_trace_cleanup(bt); return 0; } int blk_trace_remove(struct request_queue *q) { int ret; mutex_lock(&q->blk_trace_mutex); ret = __blk_trace_remove(q); mutex_unlock(&q->blk_trace_mutex); return ret; } EXPORT_SYMBOL_GPL(blk_trace_remove); static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct blk_trace *bt = filp->private_data; char buf[16]; snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); } static const struct file_operations blk_dropped_fops = { .owner = THIS_MODULE, .open = simple_open, .read = blk_dropped_read, .llseek = default_llseek, }; static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { char *msg; struct blk_trace *bt; if (count >= BLK_TN_MAX_MSG) return -EINVAL; msg = memdup_user_nul(buffer, count); if (IS_ERR(msg)) return PTR_ERR(msg); bt = filp->private_data; __trace_note_message(bt, NULL, "%s", msg); kfree(msg); return count; } static const struct file_operations blk_msg_fops = { .owner = THIS_MODULE, .open = simple_open, .write = blk_msg_write, .llseek = noop_llseek, }; /* * Keep track of how many times we encountered a full subbuffer, to aid * the user space app in telling how many lost events there were. */ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { struct blk_trace *bt; if (!relay_buf_full(buf)) return 1; bt = buf->chan->private_data; atomic_inc(&bt->dropped); return 0; } static int blk_remove_buf_file_callback(struct dentry *dentry) { debugfs_remove(dentry); return 0; } static struct dentry *blk_create_buf_file_callback(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global) { return debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); } static struct rchan_callbacks blk_relay_callbacks = { .subbuf_start = blk_subbuf_start_callback, .create_buf_file = blk_create_buf_file_callback, .remove_buf_file = blk_remove_buf_file_callback, }; static void blk_trace_setup_lba(struct blk_trace *bt, struct block_device *bdev) { struct hd_struct *part = NULL; if (bdev) part = bdev->bd_part; if (part) { bt->start_lba = part->start_sect; bt->end_lba = part->start_sect + part->nr_sects; } else { bt->start_lba = 0; bt->end_lba = -1ULL; } } /* * Setup everything required to start tracing */ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, struct blk_user_trace_setup *buts) { struct blk_trace *bt = NULL; struct dentry *dir = NULL; int ret; if (!buts->buf_size || !buts->buf_nr) return -EINVAL; if (!blk_debugfs_root) return -ENOENT; strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; /* * some device names have larger paths - convert the slashes * to underscores for this to work as expected */ strreplace(buts->name, '/', '_'); bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; ret = -ENOMEM; bt->sequence = alloc_percpu(unsigned long); if (!bt->sequence) goto err; bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); if (!bt->msg_data) goto err; ret = -ENOENT; dir = debugfs_lookup(buts->name, blk_debugfs_root); if (!dir) bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); if (!dir) goto err; bt->dev = dev; atomic_set(&bt->dropped, 0); INIT_LIST_HEAD(&bt->running_list); ret = -EIO; bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); if (!bt->dropped_file) goto err; bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); if (!bt->msg_file) goto err; bt->rchan = relay_open("trace", dir, buts->buf_size, buts->buf_nr, &blk_relay_callbacks, bt); if (!bt->rchan) goto err; bt->act_mask = buts->act_mask; if (!bt->act_mask) bt->act_mask = (u16) -1; blk_trace_setup_lba(bt, bdev); /* overwrite with user settings */ if (buts->start_lba) bt->start_lba = buts->start_lba; if (buts->end_lba) bt->end_lba = buts->end_lba; bt->pid = buts->pid; bt->trace_state = Blktrace_setup; ret = -EBUSY; if (cmpxchg(&q->blk_trace, NULL, bt)) goto err; get_probe_ref(); ret = 0; err: if (dir && !bt->dir) dput(dir); if (ret) blk_trace_free(bt); return ret; } static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; int ret; ret = copy_from_user(&buts, arg, sizeof(buts)); if (ret) return -EFAULT; ret = do_blk_trace_setup(q, name, dev, bdev, &buts); if (ret) return ret; if (copy_to_user(arg, &buts, sizeof(buts))) { __blk_trace_remove(q); return -EFAULT; } return 0; } int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) { int ret; mutex_lock(&q->blk_trace_mutex); ret = __blk_trace_setup(q, name, dev, bdev, arg); mutex_unlock(&q->blk_trace_mutex); return ret; } EXPORT_SYMBOL_GPL(blk_trace_setup); #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) static int compat_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; struct compat_blk_user_trace_setup cbuts; int ret; if (copy_from_user(&cbuts, arg, sizeof(cbuts))) return -EFAULT; buts = (struct blk_user_trace_setup) { .act_mask = cbuts.act_mask, .buf_size = cbuts.buf_size, .buf_nr = cbuts.buf_nr, .start_lba = cbuts.start_lba, .end_lba = cbuts.end_lba, .pid = cbuts.pid, }; ret = do_blk_trace_setup(q, name, dev, bdev, &buts); if (ret) return ret; if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { __blk_trace_remove(q); return -EFAULT; } return 0; } #endif static int __blk_trace_startstop(struct request_queue *q, int start) { int ret; struct blk_trace *bt = q->blk_trace; if (bt == NULL) return -EINVAL; /* * For starting a trace, we can transition from a setup or stopped * trace. For stopping a trace, the state must be running */ ret = -EINVAL; if (start) { if (bt->trace_state == Blktrace_setup || bt->trace_state == Blktrace_stopped) { blktrace_seq++; smp_mb(); bt->trace_state = Blktrace_running; spin_lock_irq(&running_trace_lock); list_add(&bt->running_list, &running_trace_list); spin_unlock_irq(&running_trace_lock); trace_note_time(bt); ret = 0; } } else { if (bt->trace_state == Blktrace_running) { bt->trace_state = Blktrace_stopped; spin_lock_irq(&running_trace_lock); list_del_init(&bt->running_list); spin_unlock_irq(&running_trace_lock); relay_flush(bt->rchan); ret = 0; } } return ret; } int blk_trace_startstop(struct request_queue *q, int start) { int ret; mutex_lock(&q->blk_trace_mutex); ret = __blk_trace_startstop(q, start); mutex_unlock(&q->blk_trace_mutex); return ret; } EXPORT_SYMBOL_GPL(blk_trace_startstop); /* * When reading or writing the blktrace sysfs files, the references to the * opened sysfs or device files should prevent the underlying block device * from being removed. So no further delete protection is really needed. */ /** * blk_trace_ioctl: - handle the ioctls associated with tracing * @bdev: the block device * @cmd: the ioctl cmd * @arg: the argument data, if any * **/ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) { struct request_queue *q; int ret, start = 0; char b[BDEVNAME_SIZE]; q = bdev_get_queue(bdev); if (!q) return -ENXIO; mutex_lock(&q->blk_trace_mutex); switch (cmd) { case BLKTRACESETUP: bdevname(bdev, b); ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) case BLKTRACESETUP32: bdevname(bdev, b); ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; #endif case BLKTRACESTART: start = 1; case BLKTRACESTOP: ret = __blk_trace_startstop(q, start); break; case BLKTRACETEARDOWN: ret = __blk_trace_remove(q); break; default: ret = -ENOTTY; break; } mutex_unlock(&q->blk_trace_mutex); return ret; } /** * blk_trace_shutdown: - stop and cleanup trace structures * @q: the request queue associated with the device * **/ void blk_trace_shutdown(struct request_queue *q) { mutex_lock(&q->blk_trace_mutex); if (q->blk_trace) { __blk_trace_startstop(q, 0); __blk_trace_remove(q); } mutex_unlock(&q->blk_trace_mutex); } #ifdef CONFIG_BLK_CGROUP static union kernfs_node_id * blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) { struct blk_trace *bt = q->blk_trace; if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; if (!bio->bi_css) return NULL; return cgroup_get_kernfs_id(bio->bi_css->cgroup); } #else static union kernfs_node_id * blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) { return NULL; } #endif static union kernfs_node_id * blk_trace_request_get_cgid(struct request_queue *q, struct request *rq) { if (!rq->bio) return NULL; /* Use the first bio */ return blk_trace_bio_get_cgid(q, rq->bio); } /* * blktrace probes */ /** * blk_add_trace_rq - Add a trace for a request oriented action * @rq: the source request * @error: return status to log * @nr_bytes: number of completed bytes * @what: the action * @cgid: the cgroup info * * Description: * Records an action against a request. Will log the bio offset + size. * **/ static void blk_add_trace_rq(struct request *rq, int error, unsigned int nr_bytes, u32 what, union kernfs_node_id *cgid) { struct blk_trace *bt = rq->q->blk_trace; if (likely(!bt)) return; if (blk_rq_is_passthrough(rq)) what |= BLK_TC_ACT(BLK_TC_PC); else what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), rq->cmd_flags, what, error, 0, NULL, cgid); } static void blk_add_trace_rq_insert(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, blk_trace_request_get_cgid(q, rq)); } static void blk_add_trace_rq_issue(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, blk_trace_request_get_cgid(q, rq)); } static void blk_add_trace_rq_requeue(void *ignore, struct request_queue *q, struct request *rq) { blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, blk_trace_request_get_cgid(q, rq)); } static void blk_add_trace_rq_complete(void *ignore, struct request *rq, int error, unsigned int nr_bytes) { blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, blk_trace_request_get_cgid(rq->q, rq)); } /** * blk_add_trace_bio - Add a trace for a bio oriented action * @q: queue the io is for * @bio: the source bio * @what: the action * @error: error, if any * * Description: * Records an action against a bio. Will log the bio offset + size. * **/ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, what, error, 0, NULL, blk_trace_bio_get_cgid(q, bio)); } static void blk_add_trace_bio_bounce(void *ignore, struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); } static void blk_add_trace_bio_complete(void *ignore, struct request_queue *q, struct bio *bio, int error) { blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); } static void blk_add_trace_bio_backmerge(void *ignore, struct request_queue *q, struct request *rq, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); } static void blk_add_trace_bio_frontmerge(void *ignore, struct request_queue *q, struct request *rq, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); } static void blk_add_trace_bio_queue(void *ignore, struct request_queue *q, struct bio *bio) { blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); } static void blk_add_trace_getrq(void *ignore, struct request_queue *q, struct bio *bio, int rw) { if (bio) blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); else { struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, NULL, NULL); } } static void blk_add_trace_sleeprq(void *ignore, struct request_queue *q, struct bio *bio, int rw) { if (bio) blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); else { struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, 0, 0, NULL, NULL); } } static void blk_add_trace_plug(void *ignore, struct request_queue *q) { struct blk_trace *bt = q->blk_trace; if (bt) __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, unsigned int depth, bool explicit) { struct blk_trace *bt = q->blk_trace; if (bt) { __be64 rpdu = cpu_to_be64(depth); u32 what; if (explicit) what = BLK_TA_UNPLUG_IO; else what = BLK_TA_UNPLUG_TIMER; __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); } } static void blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) { struct blk_trace *bt = q->blk_trace; if (bt) { __be64 rpdu = cpu_to_be64(pdu); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), &rpdu, blk_trace_bio_get_cgid(q, bio)); } } /** * blk_add_trace_bio_remap - Add a trace for a bio-remap operation * @ignore: trace callback data parameter (not used) * @q: queue the io is for * @bio: the source bio * @dev: target device * @from: source sector * * Description: * Device mapper or raid target sometimes need to split a bio because * it spans a stripe (or similar). Add a trace for that action. * **/ static void blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; if (likely(!bt)) return; r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(bio_dev(bio)); r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); } /** * blk_add_trace_rq_remap - Add a trace for a request-remap operation * @ignore: trace callback data parameter (not used) * @q: queue the io is for * @rq: the source request * @dev: target device * @from: source sector * * Description: * Device mapper remaps request to other devices. * Add a trace for that action. * **/ static void blk_add_trace_rq_remap(void *ignore, struct request_queue *q, struct request *rq, dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; if (likely(!bt)) return; r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), 0, BLK_TA_REMAP, 0, sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); } /** * blk_add_driver_data - Add binary message with driver-specific data * @q: queue the io is for * @rq: io request * @data: driver-specific data * @len: length of driver-specific data * * Description: * Some drivers might want to write driver-specific data per request. * **/ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, BLK_TA_DRV_DATA, 0, len, data, blk_trace_request_get_cgid(q, rq)); } EXPORT_SYMBOL_GPL(blk_add_driver_data); static void blk_register_tracepoints(void) { int ret; ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); WARN_ON(ret); ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); WARN_ON(ret); ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); WARN_ON(ret); ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); WARN_ON(ret); ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); WARN_ON(ret); ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); WARN_ON(ret); ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); WARN_ON(ret); ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); WARN_ON(ret); ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); WARN_ON(ret); ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); WARN_ON(ret); ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); WARN_ON(ret); ret = register_trace_block_plug(blk_add_trace_plug, NULL); WARN_ON(ret); ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); WARN_ON(ret); ret = register_trace_block_split(blk_add_trace_split, NULL); WARN_ON(ret); ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); WARN_ON(ret); ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); WARN_ON(ret); } static void blk_unregister_tracepoints(void) { unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); unregister_trace_block_split(blk_add_trace_split, NULL); unregister_trace_block_unplug(blk_add_trace_unplug, NULL); unregister_trace_block_plug(blk_add_trace_plug, NULL); unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); unregister_trace_block_getrq(blk_add_trace_getrq, NULL); unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); tracepoint_synchronize_unregister(); } /* * struct blk_io_tracer formatting routines */ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) { int i = 0; int tc = t->action >> BLK_TC_SHIFT; if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { rwbs[i++] = 'N'; goto out; } if (tc & BLK_TC_FLUSH) rwbs[i++] = 'F'; if (tc & BLK_TC_DISCARD) rwbs[i++] = 'D'; else if (tc & BLK_TC_WRITE) rwbs[i++] = 'W'; else if (t->bytes) rwbs[i++] = 'R'; else rwbs[i++] = 'N'; if (tc & BLK_TC_FUA) rwbs[i++] = 'F'; if (tc & BLK_TC_AHEAD) rwbs[i++] = 'A'; if (tc & BLK_TC_SYNC) rwbs[i++] = 'S'; if (tc & BLK_TC_META) rwbs[i++] = 'M'; out: rwbs[i] = '\0'; } static inline const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) { return (const struct blk_io_trace *)ent; } static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) { return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(union kernfs_node_id) : 0); } static inline const void *cgid_start(const struct trace_entry *ent) { return (void *)(te_blk_io_trace(ent) + 1); } static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) { return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(union kernfs_node_id) : 0); } static inline u32 t_action(const struct trace_entry *ent) { return te_blk_io_trace(ent)->action; } static inline u32 t_bytes(const struct trace_entry *ent) { return te_blk_io_trace(ent)->bytes; } static inline u32 t_sec(const struct trace_entry *ent) { return te_blk_io_trace(ent)->bytes >> 9; } static inline unsigned long long t_sector(const struct trace_entry *ent) { return te_blk_io_trace(ent)->sector; } static inline __u16 t_error(const struct trace_entry *ent) { return te_blk_io_trace(ent)->error; } static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) { const __u64 *val = pdu_start(ent, has_cg); return be64_to_cpu(*val); } static void get_pdu_remap(const struct trace_entry *ent, struct blk_io_trace_remap *r, bool has_cg) { const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); __u64 sector_from = __r->sector_from; r->device_from = be32_to_cpu(__r->device_from); r->device_to = be32_to_cpu(__r->device_to); r->sector_from = be64_to_cpu(sector_from); } typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, bool has_cg); static void blk_log_action_classic(struct trace_iterator *iter, const char *act, bool has_cg) { char rwbs[RWBS_LEN]; unsigned long long ts = iter->ts; unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); unsigned secs = (unsigned long)ts; const struct blk_io_trace *t = te_blk_io_trace(iter->ent); fill_rwbs(rwbs, t); trace_seq_printf(&iter->seq, "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", MAJOR(t->device), MINOR(t->device), iter->cpu, secs, nsec_rem, iter->ent->pid, act, rwbs); } static void blk_log_action(struct trace_iterator *iter, const char *act, bool has_cg) { char rwbs[RWBS_LEN]; const struct blk_io_trace *t = te_blk_io_trace(iter->ent); fill_rwbs(rwbs, t); if (has_cg) { const union kernfs_node_id *id = cgid_start(iter->ent); if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) { char blkcg_name_buf[NAME_MAX + 1] = "<...>"; cgroup_path_from_kernfs_id(id, blkcg_name_buf, sizeof(blkcg_name_buf)); trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ", MAJOR(t->device), MINOR(t->device), blkcg_name_buf, act, rwbs); } else trace_seq_printf(&iter->seq, "%3d,%-3d %x,%-x %2s %3s ", MAJOR(t->device), MINOR(t->device), id->ino, id->generation, act, rwbs); } else trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", MAJOR(t->device), MINOR(t->device), act, rwbs); } static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { const unsigned char *pdu_buf; int pdu_len; int i, end; pdu_buf = pdu_start(ent, has_cg); pdu_len = pdu_real_len(ent, has_cg); if (!pdu_len) return; /* find the last zero that needs to be printed */ for (end = pdu_len - 1; end >= 0; end--) if (pdu_buf[end]) break; end++; trace_seq_putc(s, '('); for (i = 0; i < pdu_len; i++) { trace_seq_printf(s, "%s%02x", i == 0 ? "" : " ", pdu_buf[i]); /* * stop when the rest is just zeroes and indicate so * with a ".." appended */ if (i == end && end != pdu_len - 1) { trace_seq_puts(s, " ..) "); return; } } trace_seq_puts(s, ") "); } static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { trace_seq_printf(s, "%u ", t_bytes(ent)); blk_log_dump_pdu(s, ent, has_cg); trace_seq_printf(s, "[%s]\n", cmd); } else { if (t_sec(ent)) trace_seq_printf(s, "%llu + %u [%s]\n", t_sector(ent), t_sec(ent), cmd); else trace_seq_printf(s, "[%s]\n", cmd); } } static void blk_log_with_error(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { blk_log_dump_pdu(s, ent, has_cg); trace_seq_printf(s, "[%d]\n", t_error(ent)); } else { if (t_sec(ent)) trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent), t_sec(ent), t_error(ent)); else trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent)); } } static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { struct blk_io_trace_remap r = { .device_from = 0, }; get_pdu_remap(ent, &r, has_cg); trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", t_sector(ent), t_sec(ent), MAJOR(r.device_from), MINOR(r.device_from), (unsigned long long)r.sector_from); } static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); trace_seq_printf(s, "[%s]\n", cmd); } static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); } static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { char cmd[TASK_COMM_LEN]; trace_find_cmdline(ent->pid, cmd); trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), get_pdu_int(ent, has_cg), cmd); } static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { trace_seq_putmem(s, pdu_start(ent, has_cg), pdu_real_len(ent, has_cg)); trace_seq_putc(s, '\n'); } /* * struct tracer operations */ static void blk_tracer_print_header(struct seq_file *m) { if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) return; seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" "# | | | | | |\n"); } static void blk_tracer_start(struct trace_array *tr) { blk_tracer_enabled = true; } static int blk_tracer_init(struct trace_array *tr) { blk_tr = tr; blk_tracer_start(tr); return 0; } static void blk_tracer_stop(struct trace_array *tr) { blk_tracer_enabled = false; } static void blk_tracer_reset(struct trace_array *tr) { blk_tracer_stop(tr); } static const struct { const char *act[2]; void (*print)(struct trace_seq *s, const struct trace_entry *ent, bool has_cg); } what2act[] = { [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, }; static enum print_line_t print_one_line(struct trace_iterator *iter, bool classic) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; const struct blk_io_trace *t; u16 what; bool long_act; blk_log_action_t *log_action; bool has_cg; t = te_blk_io_trace(iter->ent); what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP; long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); log_action = classic ? &blk_log_action_classic : &blk_log_action; has_cg = t->action & __BLK_TA_CGROUP; if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { log_action(iter, long_act ? "message" : "m", has_cg); blk_log_msg(s, iter->ent, has_cg); return trace_handle_return(s); } if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) trace_seq_printf(s, "Unknown action %x\n", what); else { log_action(iter, what2act[what].act[long_act], has_cg); what2act[what].print(s, iter->ent, has_cg); } return trace_handle_return(s); } static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, int flags, struct trace_event *event) { return print_one_line(iter, false); } static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; const int offset = offsetof(struct blk_io_trace, sector); struct blk_io_trace old = { .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, .time = iter->ts, }; trace_seq_putmem(s, &old, offset); trace_seq_putmem(s, &t->sector, sizeof(old) - offset + t->pdu_len); } static enum print_line_t blk_trace_event_print_binary(struct trace_iterator *iter, int flags, struct trace_event *event) { blk_trace_synthesize_old_trace(iter); return trace_handle_return(&iter->seq); } static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) { if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) return TRACE_TYPE_UNHANDLED; return print_one_line(iter, true); } static int blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { /* don't output context-info for blk_classic output */ if (bit == TRACE_BLK_OPT_CLASSIC) { if (set) tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; else tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; } return 0; } static struct tracer blk_tracer __read_mostly = { .name = "blk", .init = blk_tracer_init, .reset = blk_tracer_reset, .start = blk_tracer_start, .stop = blk_tracer_stop, .print_header = blk_tracer_print_header, .print_line = blk_tracer_print_line, .flags = &blk_tracer_flags, .set_flag = blk_tracer_set_flag, }; static struct trace_event_functions trace_blk_event_funcs = { .trace = blk_trace_event_print, .binary = blk_trace_event_print_binary, }; static struct trace_event trace_blk_event = { .type = TRACE_BLK, .funcs = &trace_blk_event_funcs, }; static int __init init_blk_tracer(void) { if (!register_trace_event(&trace_blk_event)) { pr_warn("Warning: could not register block events\n"); return 1; } if (register_tracer(&blk_tracer) != 0) { pr_warn("Warning: could not register the block tracer\n"); unregister_trace_event(&trace_blk_event); return 1; } return 0; } device_initcall(init_blk_tracer); static int blk_trace_remove_queue(struct request_queue *q) { struct blk_trace *bt; bt = xchg(&q->blk_trace, NULL); if (bt == NULL) return -EINVAL; put_probe_ref(); blk_trace_free(bt); return 0; } /* * Setup everything required to start tracing */ static int blk_trace_setup_queue(struct request_queue *q, struct block_device *bdev) { struct blk_trace *bt = NULL; int ret = -ENOMEM; bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); if (!bt->msg_data) goto free_bt; bt->dev = bdev->bd_dev; bt->act_mask = (u16)-1; blk_trace_setup_lba(bt, bdev); ret = -EBUSY; if (cmpxchg(&q->blk_trace, NULL, bt)) goto free_bt; get_probe_ref(); return 0; free_bt: blk_trace_free(bt); return ret; } /* * sysfs interface to enable and configure tracing */ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #define BLK_TRACE_DEVICE_ATTR(_name) \ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ sysfs_blk_trace_attr_show, \ sysfs_blk_trace_attr_store) static BLK_TRACE_DEVICE_ATTR(enable); static BLK_TRACE_DEVICE_ATTR(act_mask); static BLK_TRACE_DEVICE_ATTR(pid); static BLK_TRACE_DEVICE_ATTR(start_lba); static BLK_TRACE_DEVICE_ATTR(end_lba); static struct attribute *blk_trace_attrs[] = { &dev_attr_enable.attr, &dev_attr_act_mask.attr, &dev_attr_pid.attr, &dev_attr_start_lba.attr, &dev_attr_end_lba.attr, NULL }; struct attribute_group blk_trace_attr_group = { .name = "trace", .attrs = blk_trace_attrs, }; static const struct { int mask; const char *str; } mask_maps[] = { { BLK_TC_READ, "read" }, { BLK_TC_WRITE, "write" }, { BLK_TC_FLUSH, "flush" }, { BLK_TC_SYNC, "sync" }, { BLK_TC_QUEUE, "queue" }, { BLK_TC_REQUEUE, "requeue" }, { BLK_TC_ISSUE, "issue" }, { BLK_TC_COMPLETE, "complete" }, { BLK_TC_FS, "fs" }, { BLK_TC_PC, "pc" }, { BLK_TC_NOTIFY, "notify" }, { BLK_TC_AHEAD, "ahead" }, { BLK_TC_META, "meta" }, { BLK_TC_DISCARD, "discard" }, { BLK_TC_DRV_DATA, "drv_data" }, { BLK_TC_FUA, "fua" }, }; static int blk_trace_str2mask(const char *str) { int i; int mask = 0; char *buf, *s, *token; buf = kstrdup(str, GFP_KERNEL); if (buf == NULL) return -ENOMEM; s = strstrip(buf); while (1) { token = strsep(&s, ","); if (token == NULL) break; if (*token == '\0') continue; for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { if (strcasecmp(token, mask_maps[i].str) == 0) { mask |= mask_maps[i].mask; break; } } if (i == ARRAY_SIZE(mask_maps)) { mask = -EINVAL; break; } } kfree(buf); return mask; } static ssize_t blk_trace_mask2str(char *buf, int mask) { int i; char *p = buf; for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { if (mask & mask_maps[i].mask) { p += sprintf(p, "%s%s", (p == buf) ? "" : ",", mask_maps[i].str); } } *p++ = '\n'; return p - buf; } static struct request_queue *blk_trace_get_queue(struct block_device *bdev) { if (bdev->bd_disk == NULL) return NULL; return bdev_get_queue(bdev); } static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); struct request_queue *q; struct block_device *bdev; ssize_t ret = -ENXIO; bdev = bdget(part_devt(p)); if (bdev == NULL) goto out; q = blk_trace_get_queue(bdev); if (q == NULL) goto out_bdput; mutex_lock(&q->blk_trace_mutex); if (attr == &dev_attr_enable) { ret = sprintf(buf, "%u\n", !!q->blk_trace); goto out_unlock_bdev; } if (q->blk_trace == NULL) ret = sprintf(buf, "disabled\n"); else if (attr == &dev_attr_act_mask) ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); else if (attr == &dev_attr_pid) ret = sprintf(buf, "%u\n", q->blk_trace->pid); else if (attr == &dev_attr_start_lba) ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); else if (attr == &dev_attr_end_lba) ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); out_unlock_bdev: mutex_unlock(&q->blk_trace_mutex); out_bdput: bdput(bdev); out: return ret; } static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct block_device *bdev; struct request_queue *q; struct hd_struct *p; u64 value; ssize_t ret = -EINVAL; if (count == 0) goto out; if (attr == &dev_attr_act_mask) { if (kstrtoull(buf, 0, &value)) { /* Assume it is a list of trace category names */ ret = blk_trace_str2mask(buf); if (ret < 0) goto out; value = ret; } } else if (kstrtoull(buf, 0, &value)) goto out; ret = -ENXIO; p = dev_to_part(dev); bdev = bdget(part_devt(p)); if (bdev == NULL) goto out; q = blk_trace_get_queue(bdev); if (q == NULL) goto out_bdput; mutex_lock(&q->blk_trace_mutex); if (attr == &dev_attr_enable) { if (!!value == !!q->blk_trace) { ret = 0; goto out_unlock_bdev; } if (value) ret = blk_trace_setup_queue(q, bdev); else ret = blk_trace_remove_queue(q); goto out_unlock_bdev; } ret = 0; if (q->blk_trace == NULL) ret = blk_trace_setup_queue(q, bdev); if (ret == 0) { if (attr == &dev_attr_act_mask) q->blk_trace->act_mask = value; else if (attr == &dev_attr_pid) q->blk_trace->pid = value; else if (attr == &dev_attr_start_lba) q->blk_trace->start_lba = value; else if (attr == &dev_attr_end_lba) q->blk_trace->end_lba = value; } out_unlock_bdev: mutex_unlock(&q->blk_trace_mutex); out_bdput: bdput(bdev); out: return ret ? ret : count; } int blk_trace_init_sysfs(struct device *dev) { return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); } void blk_trace_remove_sysfs(struct device *dev) { sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); } #endif /* CONFIG_BLK_DEV_IO_TRACE */ #ifdef CONFIG_EVENT_TRACING void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes) { int i = 0; if (op & REQ_PREFLUSH) rwbs[i++] = 'F'; switch (op & REQ_OP_MASK) { case REQ_OP_WRITE: case REQ_OP_WRITE_SAME: rwbs[i++] = 'W'; break; case REQ_OP_DISCARD: rwbs[i++] = 'D'; break; case REQ_OP_SECURE_ERASE: rwbs[i++] = 'D'; rwbs[i++] = 'E'; break; case REQ_OP_FLUSH: rwbs[i++] = 'F'; break; case REQ_OP_READ: rwbs[i++] = 'R'; break; default: rwbs[i++] = 'N'; } if (op & REQ_FUA) rwbs[i++] = 'F'; if (op & REQ_RAHEAD) rwbs[i++] = 'A'; if (op & REQ_SYNC) rwbs[i++] = 'S'; if (op & REQ_META) rwbs[i++] = 'M'; rwbs[i] = '\0'; } EXPORT_SYMBOL_GPL(blk_fill_rwbs); #endif /* CONFIG_EVENT_TRACING */
gpl-2.0
SaleJumper/android-source-browsing.platform--external--checkpolicy
test/dispol.c
35
13486
/* Authors: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com> * * Copyright (C) 2003 Tresys Technology, LLC * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ /* * displaypol.c * * Test program to the contents of a binary policy in text * form. This program currently only displays the * avtab (including conditional avtab) rules. * * displaypol binary_pol_file */ #include <sepol/policydb/policydb.h> #include <sepol/policydb/avtab.h> #include <sepol/policydb/services.h> #include <sepol/policydb/conditional.h> #include <sepol/policydb/expand.h> #include <sepol/policydb/util.h> #include <sepol/policydb/polcaps.h> #include <getopt.h> #include <assert.h> #include <unistd.h> #include <stdlib.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/mman.h> #include <errno.h> #include <stdio.h> #include <fcntl.h> static policydb_t policydb; void usage(char *progname) { printf("usage: %s binary_pol_file\n\n", progname); exit(1); } int render_access_mask(uint32_t mask, avtab_key_t * key, policydb_t * p, FILE * fp) { char *perm; fprintf(fp, "{"); perm = sepol_av_to_string(p, key->target_class, mask); if (perm) fprintf(fp, "%s ", perm); fprintf(fp, "}"); return 0; } int render_type(uint32_t type, policydb_t * p, FILE * fp) { fprintf(fp, "%s", p->p_type_val_to_name[type - 1]); return 0; } int render_key(avtab_key_t * key, policydb_t * p, FILE * fp) { char *stype, *ttype, *tclass; stype = p->p_type_val_to_name[key->source_type - 1]; ttype = p->p_type_val_to_name[key->target_type - 1]; tclass = p->p_class_val_to_name[key->target_class - 1]; if (stype && ttype) fprintf(fp, "%s %s : %s ", stype, ttype, tclass); else if (stype) fprintf(fp, "%s %u : %s ", stype, key->target_type, tclass); else if (ttype) fprintf(fp, "%u %s : %s ", key->source_type, ttype, tclass); else fprintf(fp, "%u %u : %s ", key->source_type, key->target_type, tclass); return 0; } /* 'what' values for this function */ #define RENDER_UNCONDITIONAL 0x0001 /* render all regardless of enabled state */ #define RENDER_ENABLED 0x0002 #define RENDER_DISABLED 0x0004 #define RENDER_CONDITIONAL (RENDER_ENABLED|RENDER_DISABLED) int render_av_rule(avtab_key_t * key, avtab_datum_t * datum, uint32_t what, policydb_t * p, FILE * fp) { if (!(what & RENDER_UNCONDITIONAL)) { if (what != RENDER_CONDITIONAL && (((what & RENDER_ENABLED) && !(key-> specified & AVTAB_ENABLED)) || ((what & RENDER_DISABLED) && (key-> specified & AVTAB_ENABLED)))) { return 0; /* doesn't match selection criteria */ } } if (!(what & RENDER_UNCONDITIONAL)) { if (key->specified & AVTAB_ENABLED) fprintf(fp, "[enabled] "); else if (!(key->specified & AVTAB_ENABLED)) fprintf(fp, "[disabled] "); } if (key->specified & AVTAB_AV) { if (key->specified & AVTAB_ALLOWED) { fprintf(fp, "allow "); render_key(key, p, fp); render_access_mask(datum->data, key, p, fp); fprintf(fp, ";\n"); } if (key->specified & AVTAB_AUDITALLOW) { fprintf(fp, "auditallow "); render_key(key, p, fp); render_access_mask(datum->data, key, p, fp); fprintf(fp, ";\n"); } if (key->specified & AVTAB_AUDITDENY) { fprintf(fp, "dontaudit "); render_key(key, p, fp); /* We inverse the mask for dontaudit since the mask is internally stored * as a auditdeny mask */ render_access_mask(~datum->data, key, p, fp); fprintf(fp, ";\n"); } } else if (key->specified & AVTAB_TYPE) { if (key->specified & AVTAB_TRANSITION) { fprintf(fp, "type_transition "); render_key(key, p, fp); render_type(datum->data, p, fp); fprintf(fp, ";\n"); } if (key->specified & AVTAB_MEMBER) { fprintf(fp, "type_member "); render_key(key, p, fp); render_type(datum->data, p, fp); fprintf(fp, ";\n"); } if (key->specified & AVTAB_CHANGE) { fprintf(fp, "type_change "); render_key(key, p, fp); render_type(datum->data, p, fp); fprintf(fp, ";\n"); } } else { fprintf(fp, " ERROR: no valid rule type specified\n"); return -1; } return 0; } int display_avtab(avtab_t * a, uint32_t what, policydb_t * p, FILE * fp) { unsigned int i; avtab_ptr_t cur; avtab_t expa; if (avtab_init(&expa)) goto oom; if (expand_avtab(p, a, &expa)) { avtab_destroy(&expa); goto oom; } /* hmm...should have used avtab_map. */ for (i = 0; i < expa.nslot; i++) { for (cur = expa.htable[i]; cur; cur = cur->next) { render_av_rule(&cur->key, &cur->datum, what, p, fp); } } avtab_destroy(&expa); fprintf(fp, "\n"); return 0; oom: fprintf(stderr, "out of memory\n"); return 1; } int display_bools(policydb_t * p, FILE * fp) { unsigned int i; for (i = 0; i < p->p_bools.nprim; i++) { fprintf(fp, "%s : %d\n", p->p_bool_val_to_name[i], p->bool_val_to_struct[i]->state); } return 0; } void display_expr(policydb_t * p, cond_expr_t * exp, FILE * fp) { cond_expr_t *cur; for (cur = exp; cur != NULL; cur = cur->next) { switch (cur->expr_type) { case COND_BOOL: fprintf(fp, "%s ", p->p_bool_val_to_name[cur->bool - 1]); break; case COND_NOT: fprintf(fp, "! "); break; case COND_OR: fprintf(fp, "|| "); break; case COND_AND: fprintf(fp, "&& "); break; case COND_XOR: fprintf(fp, "^ "); break; case COND_EQ: fprintf(fp, "== "); break; case COND_NEQ: fprintf(fp, "!= "); break; default: fprintf(fp, "error!"); break; } } } int display_cond_expressions(policydb_t * p, FILE * fp) { cond_node_t *cur; cond_av_list_t *av_cur, *expl = NULL; avtab_t expa; for (cur = p->cond_list; cur != NULL; cur = cur->next) { fprintf(fp, "expression: "); display_expr(p, cur->expr, fp); fprintf(fp, "current state: %d\n", cur->cur_state); fprintf(fp, "True list:\n"); if (avtab_init(&expa)) goto oom; if (expand_cond_av_list(p, cur->true_list, &expl, &expa)) { avtab_destroy(&expa); goto oom; } for (av_cur = expl; av_cur != NULL; av_cur = av_cur->next) { fprintf(fp, "\t"); render_av_rule(&av_cur->node->key, &av_cur->node->datum, RENDER_CONDITIONAL, p, fp); } cond_av_list_destroy(expl); avtab_destroy(&expa); fprintf(fp, "False list:\n"); if (avtab_init(&expa)) goto oom; if (expand_cond_av_list(p, cur->false_list, &expl, &expa)) { avtab_destroy(&expa); goto oom; } for (av_cur = expl; av_cur != NULL; av_cur = av_cur->next) { fprintf(fp, "\t"); render_av_rule(&av_cur->node->key, &av_cur->node->datum, RENDER_CONDITIONAL, p, fp); } cond_av_list_destroy(expl); avtab_destroy(&expa); } return 0; oom: fprintf(stderr, "out of memory\n"); return 1; } int display_handle_unknown(policydb_t * p, FILE * out_fp) { if (p->handle_unknown == ALLOW_UNKNOWN) fprintf(out_fp, "Allow unknown classes and permisions\n"); else if (p->handle_unknown == DENY_UNKNOWN) fprintf(out_fp, "Deny unknown classes and permisions\n"); else if (p->handle_unknown == REJECT_UNKNOWN) fprintf(out_fp, "Reject unknown classes and permisions\n"); return 0; } int change_bool(char *name, int state, policydb_t * p, FILE * fp) { cond_bool_datum_t *bool; bool = hashtab_search(p->p_bools.table, name); if (bool == NULL) { fprintf(fp, "Could not find bool %s\n", name); return -1; } bool->state = state; evaluate_conds(p); return 0; } static void display_policycaps(policydb_t * p, FILE * fp) { ebitmap_node_t *node; const char *capname; char buf[64]; unsigned int i; fprintf(fp, "policy capabilities:\n"); ebitmap_for_each_bit(&p->policycaps, node, i) { if (ebitmap_node_get_bit(node, i)) { capname = sepol_polcap_getname(i); if (capname == NULL) { snprintf(buf, sizeof(buf), "unknown (%d)", i); capname = buf; } fprintf(fp, "\t%s\n", capname); } } } static void display_id(policydb_t *p, FILE *fp, uint32_t symbol_type, uint32_t symbol_value, char *prefix) { char *id = p->sym_val_to_name[symbol_type][symbol_value]; fprintf(fp, " %s%s", prefix, id); } static void display_permissive(policydb_t *p, FILE *fp) { ebitmap_node_t *node; unsigned int i; fprintf(fp, "permissive sids:\n"); ebitmap_for_each_bit(&p->permissive_map, node, i) { if (ebitmap_node_get_bit(node, i)) { fprintf(fp, "\t"); display_id(p, fp, SYM_TYPES, i - 1, ""); fprintf(fp, "\n"); } } } static void display_role_trans(policydb_t *p, FILE *fp) { role_trans_t *rt; fprintf(fp, "role_trans rules:\n"); for (rt = p->role_tr; rt; rt = rt->next) { display_id(p, fp, SYM_ROLES, rt->role - 1, ""); display_id(p, fp, SYM_TYPES, rt->type - 1, ""); display_id(p, fp, SYM_CLASSES, rt->tclass - 1, ":"); display_id(p, fp, SYM_ROLES, rt->new_role - 1, ""); fprintf(fp, "\n"); } } static void display_filename_trans(policydb_t *p, FILE *fp) { filename_trans_t *ft; fprintf(fp, "filename_trans rules:\n"); for (ft = p->filename_trans; ft; ft = ft->next) { display_id(p, fp, SYM_TYPES, ft->stype - 1, ""); display_id(p, fp, SYM_TYPES, ft->ttype - 1, ""); display_id(p, fp, SYM_CLASSES, ft->tclass - 1, ":"); display_id(p, fp, SYM_TYPES, ft->otype - 1, ""); fprintf(fp, " %s\n", ft->name); } } int menu() { printf("\nSelect a command:\n"); printf("1) display unconditional AVTAB\n"); printf("2) display conditional AVTAB (entirely)\n"); printf("3) display conditional AVTAG (only ENABLED rules)\n"); printf("4) display conditional AVTAB (only DISABLED rules)\n"); printf("5) display conditional bools\n"); printf("6) display conditional expressions\n"); printf("7) change a boolean value\n"); printf("8) display role transitions\n"); printf("\n"); printf("c) display policy capabilities\n"); printf("p) display the list of permissive types\n"); printf("u) display unknown handling setting\n"); printf("F) display filename_trans rules\n"); printf("\n"); printf("f) set output file\n"); printf("m) display menu\n"); printf("q) quit\n"); return 0; } int main(int argc, char **argv) { FILE *out_fp = stdout; char ans[81], OutfileName[121]; int fd, ret; struct stat sb; void *map; char *name; int state; struct policy_file pf; if (argc != 2) usage(argv[0]); fd = open(argv[1], O_RDONLY); if (fd < 0) { fprintf(stderr, "Can't open '%s': %s\n", argv[1], strerror(errno)); exit(1); } if (fstat(fd, &sb) < 0) { fprintf(stderr, "Can't stat '%s': %s\n", argv[1], strerror(errno)); exit(1); } map = mmap(NULL, sb.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (map == MAP_FAILED) { fprintf(stderr, "Can't map '%s': %s\n", argv[1], strerror(errno)); exit(1); } /* read the binary policy */ fprintf(out_fp, "Reading policy...\n"); policy_file_init(&pf); pf.type = PF_USE_MEMORY; pf.data = map; pf.len = sb.st_size; if (policydb_init(&policydb)) { fprintf(stderr, "%s: Out of memory!\n", argv[0]); exit(1); } ret = policydb_read(&policydb, &pf, 1); if (ret) { fprintf(stderr, "%s: error(s) encountered while parsing configuration\n", argv[0]); exit(1); } fprintf(stdout, "binary policy file loaded\n\n"); close(fd); menu(); for (;;) { printf("\nCommand (\'m\' for menu): "); fgets(ans, sizeof(ans), stdin); switch (ans[0]) { case '1': display_avtab(&policydb.te_avtab, RENDER_UNCONDITIONAL, &policydb, out_fp); break; case '2': display_avtab(&policydb.te_cond_avtab, RENDER_CONDITIONAL, &policydb, out_fp); break; case '3': display_avtab(&policydb.te_cond_avtab, RENDER_ENABLED, &policydb, out_fp); break; case '4': display_avtab(&policydb.te_cond_avtab, RENDER_DISABLED, &policydb, out_fp); break; case '5': display_bools(&policydb, out_fp); break; case '6': display_cond_expressions(&policydb, out_fp); break; case '7': printf("name? "); fgets(ans, sizeof(ans), stdin); ans[strlen(ans) - 1] = 0; name = malloc((strlen(ans) + 1) * sizeof(char)); if (name == NULL) { fprintf(stderr, "couldn't malloc string.\n"); break; } strcpy(name, ans); printf("state? "); fgets(ans, sizeof(ans), stdin); ans[strlen(ans) - 1] = 0; if (atoi(ans)) state = 1; else state = 0; change_bool(name, state, &policydb, out_fp); free(name); break; case '8': display_role_trans(&policydb, out_fp); break; case 'c': display_policycaps(&policydb, out_fp); break; case 'p': display_permissive(&policydb, out_fp); break; case 'u': case 'U': display_handle_unknown(&policydb, out_fp); break; case 'f': printf ("\nFilename for output (<CR> for screen output): "); fgets(OutfileName, sizeof(OutfileName), stdin); OutfileName[strlen(OutfileName) - 1] = '\0'; /* fix_string (remove LF) */ if (strlen(OutfileName) == 0) out_fp = stdout; else if ((out_fp = fopen(OutfileName, "w")) == NULL) { fprintf(stderr, "Cannot open output file %s\n", OutfileName); out_fp = stdout; } if (out_fp != stdout) printf("\nOutput to file: %s\n", OutfileName); break; case 'F': display_filename_trans(&policydb, out_fp); break; case 'q': policydb_destroy(&policydb); exit(0); break; case 'm': menu(); break; default: printf("\nInvalid choice\n"); menu(); break; } } } /* FLASK */
gpl-2.0
ZeroInfinityXDA/HelixKernel_Nougat
net/batman-adv/gateway_client.c
291
24528
/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "main.h" #include "sysfs.h" #include "gateway_client.h" #include "gateway_common.h" #include "hard-interface.h" #include "originator.h" #include "translation-table.h" #include "routing.h" #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/udp.h> #include <linux/if_vlan.h> /* These are the offsets of the "hw type" and "hw address length" in the dhcp * packet starting at the beginning of the dhcp header */ #define BATADV_DHCP_HTYPE_OFFSET 1 #define BATADV_DHCP_HLEN_OFFSET 2 /* Value of htype representing Ethernet */ #define BATADV_DHCP_HTYPE_ETHERNET 0x01 /* This is the offset of the "chaddr" field in the dhcp packet starting at the * beginning of the dhcp header */ #define BATADV_DHCP_CHADDR_OFFSET 28 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) { if (atomic_dec_and_test(&gw_node->refcount)) { batadv_orig_node_free_ref(gw_node->orig_node); kfree_rcu(gw_node, rcu); } } static struct batadv_gw_node * batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; rcu_read_lock(); gw_node = rcu_dereference(bat_priv->gw.curr_gw); if (!gw_node) goto out; if (!atomic_inc_not_zero(&gw_node->refcount)) gw_node = NULL; out: rcu_read_unlock(); return gw_node; } struct batadv_orig_node * batadv_gw_get_selected_orig(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; struct batadv_orig_node *orig_node = NULL; gw_node = batadv_gw_get_selected_gw_node(bat_priv); if (!gw_node) goto out; rcu_read_lock(); orig_node = gw_node->orig_node; if (!orig_node) goto unlock; if (!atomic_inc_not_zero(&orig_node->refcount)) orig_node = NULL; unlock: rcu_read_unlock(); out: if (gw_node) batadv_gw_node_free_ref(gw_node); return orig_node; } static void batadv_gw_select(struct batadv_priv *bat_priv, struct batadv_gw_node *new_gw_node) { struct batadv_gw_node *curr_gw_node; spin_lock_bh(&bat_priv->gw.list_lock); if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) new_gw_node = NULL; curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); if (curr_gw_node) batadv_gw_node_free_ref(curr_gw_node); spin_unlock_bh(&bat_priv->gw.list_lock); } /** * batadv_gw_reselect - force a gateway reselection * @bat_priv: the bat priv with all the soft interface information * * Set a flag to remind the GW component to perform a new gateway reselection. * However this function does not ensure that the current gateway is going to be * deselected. The reselection mechanism may elect the same gateway once again. * * This means that invoking batadv_gw_reselect() does not guarantee a gateway * change and therefore a uevent is not necessarily expected. */ void batadv_gw_reselect(struct batadv_priv *bat_priv) { atomic_set(&bat_priv->gw.reselect, 1); } static struct batadv_gw_node * batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo; struct batadv_gw_node *gw_node, *curr_gw = NULL; uint32_t max_gw_factor = 0, tmp_gw_factor = 0; uint32_t gw_divisor; uint8_t max_tq = 0; uint8_t tq_avg; struct batadv_orig_node *orig_node; gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; gw_divisor *= 64; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) continue; orig_node = gw_node->orig_node; router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router) continue; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto next; if (!atomic_inc_not_zero(&gw_node->refcount)) goto next; tq_avg = router_ifinfo->bat_iv.tq_avg; switch (atomic_read(&bat_priv->gw_sel_class)) { case 1: /* fast connection */ tmp_gw_factor = tq_avg * tq_avg; tmp_gw_factor *= gw_node->bandwidth_down; tmp_gw_factor *= 100 * 100; tmp_gw_factor /= gw_divisor; if ((tmp_gw_factor > max_gw_factor) || ((tmp_gw_factor == max_gw_factor) && (tq_avg > max_tq))) { if (curr_gw) batadv_gw_node_free_ref(curr_gw); curr_gw = gw_node; atomic_inc(&curr_gw->refcount); } break; default: /* 2: stable connection (use best statistic) * 3: fast-switch (use best statistic but change as * soon as a better gateway appears) * XX: late-switch (use best statistic but change as * soon as a better gateway appears which has * $routing_class more tq points) */ if (tq_avg > max_tq) { if (curr_gw) batadv_gw_node_free_ref(curr_gw); curr_gw = gw_node; atomic_inc(&curr_gw->refcount); } break; } if (tq_avg > max_tq) max_tq = tq_avg; if (tmp_gw_factor > max_gw_factor) max_gw_factor = tmp_gw_factor; batadv_gw_node_free_ref(gw_node); next: batadv_neigh_node_free_ref(router); if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); } rcu_read_unlock(); return curr_gw; } /** * batadv_gw_check_client_stop - check if client mode has been switched off * @bat_priv: the bat priv with all the soft interface information * * This function assumes the caller has checked that the gw state *is actually * changing*. This function is not supposed to be called when there is no state * change. */ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw; if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) return; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!curr_gw) return; /* deselect the current gateway so that next time that client mode is * enabled a proper GW_ADD event can be sent */ batadv_gw_select(bat_priv, NULL); /* if batman-adv is switching the gw client mode off and a gateway was * already selected, send a DEL uevent */ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); batadv_gw_node_free_ref(curr_gw); } void batadv_gw_election(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL; struct batadv_neigh_node *router = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; char gw_addr[18] = { '\0' }; if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) goto out; next_gw = batadv_gw_get_best_gw_node(bat_priv); if (curr_gw == next_gw) goto out; if (next_gw) { sprintf(gw_addr, "%pM", next_gw->orig_node->orig); router = batadv_orig_router_get(next_gw->orig_node, BATADV_IF_DEFAULT); if (!router) { batadv_gw_reselect(bat_priv); goto out; } router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) { batadv_gw_reselect(bat_priv); goto out; } } if ((curr_gw) && (!next_gw)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Removing selected gateway - no gateway in range\n"); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); } else if ((!curr_gw) && (next_gw)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n", next_gw->orig_node->orig, next_gw->bandwidth_down / 10, next_gw->bandwidth_down % 10, next_gw->bandwidth_up / 10, next_gw->bandwidth_up % 10, router_ifinfo->bat_iv.tq_avg); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD, gw_addr); } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n", next_gw->orig_node->orig, next_gw->bandwidth_down / 10, next_gw->bandwidth_down % 10, next_gw->bandwidth_up / 10, next_gw->bandwidth_up % 10, router_ifinfo->bat_iv.tq_avg); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE, gw_addr); } batadv_gw_select(bat_priv, next_gw); out: if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (next_gw) batadv_gw_node_free_ref(next_gw); if (router) batadv_neigh_node_free_ref(router); if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); } void batadv_gw_check_election(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_neigh_ifinfo *router_orig_tq = NULL; struct batadv_neigh_ifinfo *router_gw_tq = NULL; struct batadv_orig_node *curr_gw_orig; struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL; uint8_t gw_tq_avg, orig_tq_avg; curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); if (!curr_gw_orig) goto reselect; router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); if (!router_gw) goto reselect; router_gw_tq = batadv_neigh_ifinfo_get(router_gw, BATADV_IF_DEFAULT); if (!router_gw_tq) goto reselect; /* this node already is the gateway */ if (curr_gw_orig == orig_node) goto out; router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router_orig) goto out; router_orig_tq = batadv_neigh_ifinfo_get(router_orig, BATADV_IF_DEFAULT); if (!router_orig_tq) goto out; gw_tq_avg = router_gw_tq->bat_iv.tq_avg; orig_tq_avg = router_orig_tq->bat_iv.tq_avg; /* the TQ value has to be better */ if (orig_tq_avg < gw_tq_avg) goto out; /* if the routing class is greater than 3 the value tells us how much * greater the TQ value of the new gateway must be */ if ((atomic_read(&bat_priv->gw_sel_class) > 3) && (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", gw_tq_avg, orig_tq_avg); reselect: batadv_gw_reselect(bat_priv); out: if (curr_gw_orig) batadv_orig_node_free_ref(curr_gw_orig); if (router_gw) batadv_neigh_node_free_ref(router_gw); if (router_orig) batadv_neigh_node_free_ref(router_orig); if (router_gw_tq) batadv_neigh_ifinfo_free_ref(router_gw_tq); if (router_orig_tq) batadv_neigh_ifinfo_free_ref(router_orig_tq); } /** * batadv_gw_node_add - add gateway node to list of available gateways * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ static void batadv_gw_node_add(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_gateway_data *gateway) { struct batadv_gw_node *gw_node; if (gateway->bandwidth_down == 0) return; if (!atomic_inc_not_zero(&orig_node->refcount)) return; gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); if (!gw_node) { batadv_orig_node_free_ref(orig_node); return; } INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw.list_lock); hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); spin_unlock_bh(&bat_priv->gw.list_lock); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", orig_node->orig, ntohl(gateway->bandwidth_down) / 10, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); } /** * batadv_gw_node_get - retrieve gateway node from list of available gateways * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * * Returns gateway node if found or NULL otherwise. */ static struct batadv_gw_node * batadv_gw_node_get(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_gw_node *gw_node_tmp, *gw_node = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) { if (gw_node_tmp->orig_node != orig_node) continue; if (gw_node_tmp->deleted) continue; if (!atomic_inc_not_zero(&gw_node_tmp->refcount)) continue; gw_node = gw_node_tmp; break; } rcu_read_unlock(); return gw_node; } /** * batadv_gw_node_update - update list of available gateways with changed * bandwidth information * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ void batadv_gw_node_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_gateway_data *gateway) { struct batadv_gw_node *gw_node, *curr_gw = NULL; gw_node = batadv_gw_node_get(bat_priv, orig_node); if (!gw_node) { batadv_gw_node_add(bat_priv, orig_node, gateway); goto out; } if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) && (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n", orig_node->orig, gw_node->bandwidth_down / 10, gw_node->bandwidth_down % 10, gw_node->bandwidth_up / 10, gw_node->bandwidth_up % 10, ntohl(gateway->bandwidth_down) / 10, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); gw_node->deleted = 0; if (ntohl(gateway->bandwidth_down) == 0) { gw_node->deleted = jiffies; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Gateway %pM removed from gateway list\n", orig_node->orig); /* Note: We don't need a NULL check here, since curr_gw never * gets dereferenced. */ curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (gw_node == curr_gw) batadv_gw_reselect(bat_priv); } out: if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (gw_node) batadv_gw_node_free_ref(gw_node); } void batadv_gw_node_delete(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_tvlv_gateway_data gateway; gateway.bandwidth_down = 0; gateway.bandwidth_up = 0; batadv_gw_node_update(bat_priv, orig_node, &gateway); } void batadv_gw_node_purge(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node, *curr_gw; struct hlist_node *node_tmp; unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); int do_reselect = 0; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); spin_lock_bh(&bat_priv->gw.list_lock); hlist_for_each_entry_safe(gw_node, node_tmp, &bat_priv->gw.list, list) { if (((!gw_node->deleted) || (time_before(jiffies, gw_node->deleted + timeout))) && atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) continue; if (curr_gw == gw_node) do_reselect = 1; hlist_del_rcu(&gw_node->list); batadv_gw_node_free_ref(gw_node); } spin_unlock_bh(&bat_priv->gw.list_lock); /* gw_reselect() needs to acquire the gw_list_lock */ if (do_reselect) batadv_gw_reselect(bat_priv); if (curr_gw) batadv_gw_node_free_ref(curr_gw); } /* fails if orig_node has no router */ static int batadv_write_buffer_text(struct batadv_priv *bat_priv, struct seq_file *seq, const struct batadv_gw_node *gw_node) { struct batadv_gw_node *curr_gw; struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo = NULL; int ret = -1; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); if (!router) goto out; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", (curr_gw == gw_node ? "=>" : " "), gw_node->orig_node->orig, router_ifinfo->bat_iv.tq_avg, router->addr, router->if_incoming->net_dev->name, gw_node->bandwidth_down / 10, gw_node->bandwidth_down % 10, gw_node->bandwidth_up / 10, gw_node->bandwidth_up % 10); if (curr_gw) batadv_gw_node_free_ref(curr_gw); out: if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); if (router) batadv_neigh_node_free_ref(router); return ret; } int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hard_iface *primary_if; struct batadv_gw_node *gw_node; int gw_count = 0; primary_if = batadv_seq_print_text_primary_if_get(seq); if (!primary_if) goto out; seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name); rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) continue; /* fails if orig_node has no router */ if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0) continue; gw_count++; } rcu_read_unlock(); if (gw_count == 0) seq_puts(seq, "No gateways in range ...\n"); out: if (primary_if) batadv_hardif_free_ref(primary_if); return 0; } /** * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message * @skb: the packet to check * @header_len: a pointer to the batman-adv header size * @chaddr: buffer where the client address will be stored. Valid * only if the function returns BATADV_DHCP_TO_CLIENT * * Returns: * - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error * while parsing it * - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server * - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client * * This function may re-allocate the data buffer of the skb passed as argument. */ enum batadv_dhcp_recipient batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, uint8_t *chaddr) { enum batadv_dhcp_recipient ret = BATADV_DHCP_NO; struct ethhdr *ethhdr; struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; struct udphdr *udphdr; struct vlan_ethhdr *vhdr; int chaddr_offset; __be16 proto; uint8_t *p; /* check for ethernet header */ if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) return BATADV_DHCP_NO; ethhdr = eth_hdr(skb); proto = ethhdr->h_proto; *header_len += ETH_HLEN; /* check for initial vlan header */ if (proto == htons(ETH_P_8021Q)) { if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) return BATADV_DHCP_NO; vhdr = vlan_eth_hdr(skb); proto = vhdr->h_vlan_encapsulated_proto; *header_len += VLAN_HLEN; } /* check for ip header */ switch (proto) { case htons(ETH_P_IP): if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) return BATADV_DHCP_NO; iphdr = (struct iphdr *)(skb->data + *header_len); *header_len += iphdr->ihl * 4; /* check for udp header */ if (iphdr->protocol != IPPROTO_UDP) return BATADV_DHCP_NO; break; case htons(ETH_P_IPV6): if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) return BATADV_DHCP_NO; ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); *header_len += sizeof(*ipv6hdr); /* check for udp header */ if (ipv6hdr->nexthdr != IPPROTO_UDP) return BATADV_DHCP_NO; break; default: return BATADV_DHCP_NO; } if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) return BATADV_DHCP_NO; /* skb->data might have been reallocated by pskb_may_pull() */ ethhdr = eth_hdr(skb); if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); udphdr = (struct udphdr *)(skb->data + *header_len); *header_len += sizeof(*udphdr); /* check for bootp port */ switch (proto) { case htons(ETH_P_IP): if (udphdr->dest == htons(67)) ret = BATADV_DHCP_TO_SERVER; else if (udphdr->source == htons(67)) ret = BATADV_DHCP_TO_CLIENT; break; case htons(ETH_P_IPV6): if (udphdr->dest == htons(547)) ret = BATADV_DHCP_TO_SERVER; else if (udphdr->source == htons(547)) ret = BATADV_DHCP_TO_CLIENT; break; } chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ if (ret == BATADV_DHCP_TO_CLIENT && pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) return BATADV_DHCP_NO; /* check if the DHCP packet carries a valid Ethernet address */ p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET; if (*p != ETH_ALEN) return BATADV_DHCP_NO; ether_addr_copy(chaddr, skb->data + chaddr_offset); } return ret; } /** * batadv_gw_out_of_range - check if the dhcp request destination is the best gw * @bat_priv: the bat priv with all the soft interface information * @skb: the outgoing packet * * Check if the skb is a DHCP request and if it is sent to the current best GW * server. Due to topology changes it may be the case that the GW server * previously selected is not the best one anymore. * * Returns true if the packet destination is unicast and it is not the best gw, * false otherwise. * * This call might reallocate skb data. * Must be invoked only when the DHCP packet is going TO a DHCP SERVER. */ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL; struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; struct ethhdr *ethhdr = (struct ethhdr *)skb->data; bool out_of_range = false; uint8_t curr_tq_avg; unsigned short vid; vid = batadv_get_vid(skb, 0); orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid); if (!orig_dst_node) goto out; gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); if (!gw_node) goto out; switch (atomic_read(&bat_priv->gw_mode)) { case BATADV_GW_MODE_SERVER: /* If we are a GW then we are our best GW. We can artificially * set the tq towards ourself as the maximum value */ curr_tq_avg = BATADV_TQ_MAX_VALUE; break; case BATADV_GW_MODE_CLIENT: curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!curr_gw) goto out; /* packet is going to our gateway */ if (curr_gw->orig_node == orig_dst_node) goto out; /* If the dhcp packet has been sent to a different gw, * we have to evaluate whether the old gw is still * reliable enough */ neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node, NULL); if (!neigh_curr) goto out; curr_ifinfo = batadv_neigh_ifinfo_get(neigh_curr, BATADV_IF_DEFAULT); if (!curr_ifinfo) goto out; curr_tq_avg = curr_ifinfo->bat_iv.tq_avg; batadv_neigh_ifinfo_free_ref(curr_ifinfo); break; case BATADV_GW_MODE_OFF: default: goto out; } neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL); if (!neigh_old) goto out; old_ifinfo = batadv_neigh_ifinfo_get(neigh_old, BATADV_IF_DEFAULT); if (!old_ifinfo) goto out; if ((curr_tq_avg - old_ifinfo->bat_iv.tq_avg) > BATADV_GW_THRESHOLD) out_of_range = true; batadv_neigh_ifinfo_free_ref(old_ifinfo); out: if (orig_dst_node) batadv_orig_node_free_ref(orig_dst_node); if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (gw_node) batadv_gw_node_free_ref(gw_node); if (neigh_old) batadv_neigh_node_free_ref(neigh_old); if (neigh_curr) batadv_neigh_node_free_ref(neigh_curr); return out_of_range; }
gpl-2.0
CyanogenMod/android_kernel_samsung_galaxytab-cdma
drivers/video/sis/sis_main.c
803
185765
/* * SiS 300/540/630[S]/730[S], * SiS 315[E|PRO]/550/[M]65x/[M]66x[F|M|G]X/[M]74x[GX]/330/[M]76x[GX], * XGI V3XT/V5/V8, Z7 * frame buffer driver for Linux kernels >= 2.4.14 and >=2.6.3 * * Copyright (C) 2001-2005 Thomas Winischhofer, Vienna, Austria. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the named License, * or any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA * * Author: Thomas Winischhofer <thomas@winischhofer.net> * * Author of (practically wiped) code base: * SiS (www.sis.com) * Copyright (C) 1999 Silicon Integrated Systems, Inc. * * See http://www.winischhofer.net/ for more information and updates * * Originally based on the VBE 2.0 compliant graphic boards framebuffer driver, * which is (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/screen_info.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/selection.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/uaccess.h> #include <asm/io.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include "sis.h" #include "sis_main.h" static void sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_command); /* ------------------ Internal helper routines ----------------- */ static void __init sisfb_setdefaultparms(void) { sisfb_off = 0; sisfb_parm_mem = 0; sisfb_accel = -1; sisfb_ypan = -1; sisfb_max = -1; sisfb_userom = -1; sisfb_useoem = -1; sisfb_mode_idx = -1; sisfb_parm_rate = -1; sisfb_crt1off = 0; sisfb_forcecrt1 = -1; sisfb_crt2type = -1; sisfb_crt2flags = 0; sisfb_pdc = 0xff; sisfb_pdca = 0xff; sisfb_scalelcd = -1; sisfb_specialtiming = CUT_NONE; sisfb_lvdshl = -1; sisfb_dstn = 0; sisfb_fstn = 0; sisfb_tvplug = -1; sisfb_tvstd = -1; sisfb_tvxposoffset = 0; sisfb_tvyposoffset = 0; sisfb_nocrt2rate = 0; #if !defined(__i386__) && !defined(__x86_64__) sisfb_resetcard = 0; sisfb_videoram = 0; #endif } /* ------------- Parameter parsing -------------- */ static void __devinit sisfb_search_vesamode(unsigned int vesamode, bool quiet) { int i = 0, j = 0; /* We don't know the hardware specs yet and there is no ivideo */ if(vesamode == 0) { if(!quiet) printk(KERN_ERR "sisfb: Invalid mode. Using default.\n"); sisfb_mode_idx = DEFAULT_MODE; return; } vesamode &= 0x1dff; /* Clean VESA mode number from other flags */ while(sisbios_mode[i++].mode_no[0] != 0) { if( (sisbios_mode[i-1].vesa_mode_no_1 == vesamode) || (sisbios_mode[i-1].vesa_mode_no_2 == vesamode) ) { if(sisfb_fstn) { if(sisbios_mode[i-1].mode_no[1] == 0x50 || sisbios_mode[i-1].mode_no[1] == 0x56 || sisbios_mode[i-1].mode_no[1] == 0x53) continue; } else { if(sisbios_mode[i-1].mode_no[1] == 0x5a || sisbios_mode[i-1].mode_no[1] == 0x5b) continue; } sisfb_mode_idx = i - 1; j = 1; break; } } if((!j) && !quiet) printk(KERN_ERR "sisfb: Invalid VESA mode 0x%x'\n", vesamode); } static void __devinit sisfb_search_mode(char *name, bool quiet) { unsigned int j = 0, xres = 0, yres = 0, depth = 0, rate = 0; int i = 0; char strbuf[16], strbuf1[20]; char *nameptr = name; /* We don't know the hardware specs yet and there is no ivideo */ if(name == NULL) { if(!quiet) printk(KERN_ERR "sisfb: Internal error, using default mode.\n"); sisfb_mode_idx = DEFAULT_MODE; return; } if(!strnicmp(name, sisbios_mode[MODE_INDEX_NONE].name, strlen(name))) { if(!quiet) printk(KERN_ERR "sisfb: Mode 'none' not supported anymore. Using default.\n"); sisfb_mode_idx = DEFAULT_MODE; return; } if(strlen(name) <= 19) { strcpy(strbuf1, name); for(i = 0; i < strlen(strbuf1); i++) { if(strbuf1[i] < '0' || strbuf1[i] > '9') strbuf1[i] = ' '; } /* This does some fuzzy mode naming detection */ if(sscanf(strbuf1, "%u %u %u %u", &xres, &yres, &depth, &rate) == 4) { if((rate <= 32) || (depth > 32)) { j = rate; rate = depth; depth = j; } sprintf(strbuf, "%ux%ux%u", xres, yres, depth); nameptr = strbuf; sisfb_parm_rate = rate; } else if(sscanf(strbuf1, "%u %u %u", &xres, &yres, &depth) == 3) { sprintf(strbuf, "%ux%ux%u", xres, yres, depth); nameptr = strbuf; } else { xres = 0; if((sscanf(strbuf1, "%u %u", &xres, &yres) == 2) && (xres != 0)) { sprintf(strbuf, "%ux%ux8", xres, yres); nameptr = strbuf; } else { sisfb_search_vesamode(simple_strtoul(name, NULL, 0), quiet); return; } } } i = 0; j = 0; while(sisbios_mode[i].mode_no[0] != 0) { if(!strnicmp(nameptr, sisbios_mode[i++].name, strlen(nameptr))) { if(sisfb_fstn) { if(sisbios_mode[i-1].mode_no[1] == 0x50 || sisbios_mode[i-1].mode_no[1] == 0x56 || sisbios_mode[i-1].mode_no[1] == 0x53) continue; } else { if(sisbios_mode[i-1].mode_no[1] == 0x5a || sisbios_mode[i-1].mode_no[1] == 0x5b) continue; } sisfb_mode_idx = i - 1; j = 1; break; } } if((!j) && !quiet) printk(KERN_ERR "sisfb: Invalid mode '%s'\n", nameptr); } #ifndef MODULE static void __devinit sisfb_get_vga_mode_from_kernel(void) { #ifdef CONFIG_X86 char mymode[32]; int mydepth = screen_info.lfb_depth; if(screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) return; if( (screen_info.lfb_width >= 320) && (screen_info.lfb_width <= 2048) && (screen_info.lfb_height >= 200) && (screen_info.lfb_height <= 1536) && (mydepth >= 8) && (mydepth <= 32) ) { if(mydepth == 24) mydepth = 32; sprintf(mymode, "%ux%ux%u", screen_info.lfb_width, screen_info.lfb_height, mydepth); printk(KERN_DEBUG "sisfb: Using vga mode %s pre-set by kernel as default\n", mymode); sisfb_search_mode(mymode, true); } #endif return; } #endif static void __init sisfb_search_crt2type(const char *name) { int i = 0; /* We don't know the hardware specs yet and there is no ivideo */ if(name == NULL) return; while(sis_crt2type[i].type_no != -1) { if(!strnicmp(name, sis_crt2type[i].name, strlen(sis_crt2type[i].name))) { sisfb_crt2type = sis_crt2type[i].type_no; sisfb_tvplug = sis_crt2type[i].tvplug_no; sisfb_crt2flags = sis_crt2type[i].flags; break; } i++; } sisfb_dstn = (sisfb_crt2flags & FL_550_DSTN) ? 1 : 0; sisfb_fstn = (sisfb_crt2flags & FL_550_FSTN) ? 1 : 0; if(sisfb_crt2type < 0) printk(KERN_ERR "sisfb: Invalid CRT2 type: %s\n", name); } static void __init sisfb_search_tvstd(const char *name) { int i = 0; /* We don't know the hardware specs yet and there is no ivideo */ if(name == NULL) return; while(sis_tvtype[i].type_no != -1) { if(!strnicmp(name, sis_tvtype[i].name, strlen(sis_tvtype[i].name))) { sisfb_tvstd = sis_tvtype[i].type_no; break; } i++; } } static void __init sisfb_search_specialtiming(const char *name) { int i = 0; bool found = false; /* We don't know the hardware specs yet and there is no ivideo */ if(name == NULL) return; if(!strnicmp(name, "none", 4)) { sisfb_specialtiming = CUT_FORCENONE; printk(KERN_DEBUG "sisfb: Special timing disabled\n"); } else { while(mycustomttable[i].chipID != 0) { if(!strnicmp(name,mycustomttable[i].optionName, strlen(mycustomttable[i].optionName))) { sisfb_specialtiming = mycustomttable[i].SpecialID; found = true; printk(KERN_INFO "sisfb: Special timing for %s %s forced (\"%s\")\n", mycustomttable[i].vendorName, mycustomttable[i].cardName, mycustomttable[i].optionName); break; } i++; } if(!found) { printk(KERN_WARNING "sisfb: Invalid SpecialTiming parameter, valid are:"); printk(KERN_WARNING "\t\"none\" (to disable special timings)\n"); i = 0; while(mycustomttable[i].chipID != 0) { printk(KERN_WARNING "\t\"%s\" (for %s %s)\n", mycustomttable[i].optionName, mycustomttable[i].vendorName, mycustomttable[i].cardName); i++; } } } } /* ----------- Various detection routines ----------- */ static void __devinit sisfb_detect_custom_timing(struct sis_video_info *ivideo) { unsigned char *biosver = NULL; unsigned char *biosdate = NULL; bool footprint; u32 chksum = 0; int i, j; if(ivideo->SiS_Pr.UseROM) { biosver = ivideo->SiS_Pr.VirtualRomBase + 0x06; biosdate = ivideo->SiS_Pr.VirtualRomBase + 0x2c; for(i = 0; i < 32768; i++) chksum += ivideo->SiS_Pr.VirtualRomBase[i]; } i = 0; do { if( (mycustomttable[i].chipID == ivideo->chip) && ((!strlen(mycustomttable[i].biosversion)) || (ivideo->SiS_Pr.UseROM && (!strncmp(mycustomttable[i].biosversion, biosver, strlen(mycustomttable[i].biosversion))))) && ((!strlen(mycustomttable[i].biosdate)) || (ivideo->SiS_Pr.UseROM && (!strncmp(mycustomttable[i].biosdate, biosdate, strlen(mycustomttable[i].biosdate))))) && ((!mycustomttable[i].bioschksum) || (ivideo->SiS_Pr.UseROM && (mycustomttable[i].bioschksum == chksum))) && (mycustomttable[i].pcisubsysvendor == ivideo->subsysvendor) && (mycustomttable[i].pcisubsyscard == ivideo->subsysdevice) ) { footprint = true; for(j = 0; j < 5; j++) { if(mycustomttable[i].biosFootprintAddr[j]) { if(ivideo->SiS_Pr.UseROM) { if(ivideo->SiS_Pr.VirtualRomBase[mycustomttable[i].biosFootprintAddr[j]] != mycustomttable[i].biosFootprintData[j]) { footprint = false; } } else footprint = false; } } if(footprint) { ivideo->SiS_Pr.SiS_CustomT = mycustomttable[i].SpecialID; printk(KERN_DEBUG "sisfb: Identified [%s %s], special timing applies\n", mycustomttable[i].vendorName, mycustomttable[i].cardName); printk(KERN_DEBUG "sisfb: [specialtiming parameter name: %s]\n", mycustomttable[i].optionName); break; } } i++; } while(mycustomttable[i].chipID); } static bool __devinit sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer) { int i, j, xres, yres, refresh, index; u32 emodes; if(buffer[0] != 0x00 || buffer[1] != 0xff || buffer[2] != 0xff || buffer[3] != 0xff || buffer[4] != 0xff || buffer[5] != 0xff || buffer[6] != 0xff || buffer[7] != 0x00) { printk(KERN_DEBUG "sisfb: Bad EDID header\n"); return false; } if(buffer[0x12] != 0x01) { printk(KERN_INFO "sisfb: EDID version %d not supported\n", buffer[0x12]); return false; } monitor->feature = buffer[0x18]; if(!(buffer[0x14] & 0x80)) { if(!(buffer[0x14] & 0x08)) { printk(KERN_INFO "sisfb: WARNING: Monitor does not support separate syncs\n"); } } if(buffer[0x13] >= 0x01) { /* EDID V1 rev 1 and 2: Search for monitor descriptor * to extract ranges */ j = 0x36; for(i=0; i<4; i++) { if(buffer[j] == 0x00 && buffer[j + 1] == 0x00 && buffer[j + 2] == 0x00 && buffer[j + 3] == 0xfd && buffer[j + 4] == 0x00) { monitor->hmin = buffer[j + 7]; monitor->hmax = buffer[j + 8]; monitor->vmin = buffer[j + 5]; monitor->vmax = buffer[j + 6]; monitor->dclockmax = buffer[j + 9] * 10 * 1000; monitor->datavalid = true; break; } j += 18; } } if(!monitor->datavalid) { /* Otherwise: Get a range from the list of supported * Estabished Timings. This is not entirely accurate, * because fixed frequency monitors are not supported * that way. */ monitor->hmin = 65535; monitor->hmax = 0; monitor->vmin = 65535; monitor->vmax = 0; monitor->dclockmax = 0; emodes = buffer[0x23] | (buffer[0x24] << 8) | (buffer[0x25] << 16); for(i = 0; i < 13; i++) { if(emodes & sisfb_ddcsmodes[i].mask) { if(monitor->hmin > sisfb_ddcsmodes[i].h) monitor->hmin = sisfb_ddcsmodes[i].h; if(monitor->hmax < sisfb_ddcsmodes[i].h) monitor->hmax = sisfb_ddcsmodes[i].h + 1; if(monitor->vmin > sisfb_ddcsmodes[i].v) monitor->vmin = sisfb_ddcsmodes[i].v; if(monitor->vmax < sisfb_ddcsmodes[i].v) monitor->vmax = sisfb_ddcsmodes[i].v; if(monitor->dclockmax < sisfb_ddcsmodes[i].d) monitor->dclockmax = sisfb_ddcsmodes[i].d; } } index = 0x26; for(i = 0; i < 8; i++) { xres = (buffer[index] + 31) * 8; switch(buffer[index + 1] & 0xc0) { case 0xc0: yres = (xres * 9) / 16; break; case 0x80: yres = (xres * 4) / 5; break; case 0x40: yres = (xres * 3) / 4; break; default: yres = xres; break; } refresh = (buffer[index + 1] & 0x3f) + 60; if((xres >= 640) && (yres >= 480)) { for(j = 0; j < 8; j++) { if((xres == sisfb_ddcfmodes[j].x) && (yres == sisfb_ddcfmodes[j].y) && (refresh == sisfb_ddcfmodes[j].v)) { if(monitor->hmin > sisfb_ddcfmodes[j].h) monitor->hmin = sisfb_ddcfmodes[j].h; if(monitor->hmax < sisfb_ddcfmodes[j].h) monitor->hmax = sisfb_ddcfmodes[j].h + 1; if(monitor->vmin > sisfb_ddcsmodes[j].v) monitor->vmin = sisfb_ddcsmodes[j].v; if(monitor->vmax < sisfb_ddcsmodes[j].v) monitor->vmax = sisfb_ddcsmodes[j].v; if(monitor->dclockmax < sisfb_ddcsmodes[j].d) monitor->dclockmax = sisfb_ddcsmodes[j].d; } } } index += 2; } if((monitor->hmin <= monitor->hmax) && (monitor->vmin <= monitor->vmax)) { monitor->datavalid = true; } } return monitor->datavalid; } static void __devinit sisfb_handle_ddc(struct sis_video_info *ivideo, struct sisfb_monitor *monitor, int crtno) { unsigned short temp, i, realcrtno = crtno; unsigned char buffer[256]; monitor->datavalid = false; if(crtno) { if(ivideo->vbflags & CRT2_LCD) realcrtno = 1; else if(ivideo->vbflags & CRT2_VGA) realcrtno = 2; else return; } if((ivideo->sisfb_crt1off) && (!crtno)) return; temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine, realcrtno, 0, &buffer[0], ivideo->vbflags2); if((!temp) || (temp == 0xffff)) { printk(KERN_INFO "sisfb: CRT%d DDC probing failed\n", crtno + 1); return; } else { printk(KERN_INFO "sisfb: CRT%d DDC supported\n", crtno + 1); printk(KERN_INFO "sisfb: CRT%d DDC level: %s%s%s%s\n", crtno + 1, (temp & 0x1a) ? "" : "[none of the supported]", (temp & 0x02) ? "2 " : "", (temp & 0x08) ? "D&P" : "", (temp & 0x10) ? "FPDI-2" : ""); if(temp & 0x02) { i = 3; /* Number of retrys */ do { temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine, realcrtno, 1, &buffer[0], ivideo->vbflags2); } while((temp) && i--); if(!temp) { if(sisfb_interpret_edid(monitor, &buffer[0])) { printk(KERN_INFO "sisfb: Monitor range H %d-%dKHz, V %d-%dHz, Max. dotclock %dMHz\n", monitor->hmin, monitor->hmax, monitor->vmin, monitor->vmax, monitor->dclockmax / 1000); } else { printk(KERN_INFO "sisfb: CRT%d DDC EDID corrupt\n", crtno + 1); } } else { printk(KERN_INFO "sisfb: CRT%d DDC reading failed\n", crtno + 1); } } else { printk(KERN_INFO "sisfb: VESA D&P and FPDI-2 not supported yet\n"); } } } /* -------------- Mode validation --------------- */ static bool sisfb_verify_rate(struct sis_video_info *ivideo, struct sisfb_monitor *monitor, int mode_idx, int rate_idx, int rate) { int htotal, vtotal; unsigned int dclock, hsync; if(!monitor->datavalid) return true; if(mode_idx < 0) return false; /* Skip for 320x200, 320x240, 640x400 */ switch(sisbios_mode[mode_idx].mode_no[ivideo->mni]) { case 0x59: case 0x41: case 0x4f: case 0x50: case 0x56: case 0x53: case 0x2f: case 0x5d: case 0x5e: return true; #ifdef CONFIG_FB_SIS_315 case 0x5a: case 0x5b: if(ivideo->sisvga_engine == SIS_315_VGA) return true; #endif } if(rate < (monitor->vmin - 1)) return false; if(rate > (monitor->vmax + 1)) return false; if(sisfb_gettotalfrommode(&ivideo->SiS_Pr, sisbios_mode[mode_idx].mode_no[ivideo->mni], &htotal, &vtotal, rate_idx)) { dclock = (htotal * vtotal * rate) / 1000; if(dclock > (monitor->dclockmax + 1000)) return false; hsync = dclock / htotal; if(hsync < (monitor->hmin - 1)) return false; if(hsync > (monitor->hmax + 1)) return false; } else { return false; } return true; } static int sisfb_validate_mode(struct sis_video_info *ivideo, int myindex, u32 vbflags) { u16 xres=0, yres, myres; #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { if(!(sisbios_mode[myindex].chipset & MD_SIS300)) return -1 ; } #endif #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { if(!(sisbios_mode[myindex].chipset & MD_SIS315)) return -1; } #endif myres = sisbios_mode[myindex].yres; switch(vbflags & VB_DISPTYPE_DISP2) { case CRT2_LCD: xres = ivideo->lcdxres; yres = ivideo->lcdyres; if((ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL848) && (ivideo->SiS_Pr.SiS_CustomT != CUT_PANEL856)) { if(sisbios_mode[myindex].xres > xres) return -1; if(myres > yres) return -1; } if(ivideo->sisfb_fstn) { if(sisbios_mode[myindex].xres == 320) { if(myres == 240) { switch(sisbios_mode[myindex].mode_no[1]) { case 0x50: myindex = MODE_FSTN_8; break; case 0x56: myindex = MODE_FSTN_16; break; case 0x53: return -1; } } } } if(SiS_GetModeID_LCD(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres, sisbios_mode[myindex].yres, 0, ivideo->sisfb_fstn, ivideo->SiS_Pr.SiS_CustomT, xres, yres, ivideo->vbflags2) < 0x14) { return -1; } break; case CRT2_TV: if(SiS_GetModeID_TV(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres, sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) { return -1; } break; case CRT2_VGA: if(SiS_GetModeID_VGA2(ivideo->sisvga_engine, vbflags, sisbios_mode[myindex].xres, sisbios_mode[myindex].yres, 0, ivideo->vbflags2) < 0x14) { return -1; } break; } return myindex; } static u8 sisfb_search_refresh_rate(struct sis_video_info *ivideo, unsigned int rate, int mode_idx) { int i = 0; u16 xres = sisbios_mode[mode_idx].xres; u16 yres = sisbios_mode[mode_idx].yres; ivideo->rate_idx = 0; while((sisfb_vrate[i].idx != 0) && (sisfb_vrate[i].xres <= xres)) { if((sisfb_vrate[i].xres == xres) && (sisfb_vrate[i].yres == yres)) { if(sisfb_vrate[i].refresh == rate) { ivideo->rate_idx = sisfb_vrate[i].idx; break; } else if(sisfb_vrate[i].refresh > rate) { if((sisfb_vrate[i].refresh - rate) <= 3) { DPRINTK("sisfb: Adjusting rate from %d up to %d\n", rate, sisfb_vrate[i].refresh); ivideo->rate_idx = sisfb_vrate[i].idx; ivideo->refresh_rate = sisfb_vrate[i].refresh; } else if((sisfb_vrate[i].idx != 1) && ((rate - sisfb_vrate[i-1].refresh) <= 2)) { DPRINTK("sisfb: Adjusting rate from %d down to %d\n", rate, sisfb_vrate[i-1].refresh); ivideo->rate_idx = sisfb_vrate[i-1].idx; ivideo->refresh_rate = sisfb_vrate[i-1].refresh; } break; } else if((rate - sisfb_vrate[i].refresh) <= 2) { DPRINTK("sisfb: Adjusting rate from %d down to %d\n", rate, sisfb_vrate[i].refresh); ivideo->rate_idx = sisfb_vrate[i].idx; break; } } i++; } if(ivideo->rate_idx > 0) { return ivideo->rate_idx; } else { printk(KERN_INFO "sisfb: Unsupported rate %d for %dx%d\n", rate, xres, yres); return 0; } } static bool sisfb_bridgeisslave(struct sis_video_info *ivideo) { unsigned char P1_00; if(!(ivideo->vbflags2 & VB2_VIDEOBRIDGE)) return false; inSISIDXREG(SISPART1,0x00,P1_00); if( ((ivideo->sisvga_engine == SIS_300_VGA) && (P1_00 & 0xa0) == 0x20) || ((ivideo->sisvga_engine == SIS_315_VGA) && (P1_00 & 0x50) == 0x10) ) { return true; } else { return false; } } static bool sisfballowretracecrt1(struct sis_video_info *ivideo) { u8 temp; inSISIDXREG(SISCR,0x17,temp); if(!(temp & 0x80)) return false; inSISIDXREG(SISSR,0x1f,temp); if(temp & 0xc0) return false; return true; } static bool sisfbcheckvretracecrt1(struct sis_video_info *ivideo) { if(!sisfballowretracecrt1(ivideo)) return false; if(inSISREG(SISINPSTAT) & 0x08) return true; else return false; } static void sisfbwaitretracecrt1(struct sis_video_info *ivideo) { int watchdog; if(!sisfballowretracecrt1(ivideo)) return; watchdog = 65536; while((!(inSISREG(SISINPSTAT) & 0x08)) && --watchdog); watchdog = 65536; while((inSISREG(SISINPSTAT) & 0x08) && --watchdog); } static bool sisfbcheckvretracecrt2(struct sis_video_info *ivideo) { unsigned char temp, reg; switch(ivideo->sisvga_engine) { case SIS_300_VGA: reg = 0x25; break; case SIS_315_VGA: reg = 0x30; break; default: return false; } inSISIDXREG(SISPART1, reg, temp); if(temp & 0x02) return true; else return false; } static bool sisfb_CheckVBRetrace(struct sis_video_info *ivideo) { if(ivideo->currentvbflags & VB_DISPTYPE_DISP2) { if(!sisfb_bridgeisslave(ivideo)) { return sisfbcheckvretracecrt2(ivideo); } } return sisfbcheckvretracecrt1(ivideo); } static u32 sisfb_setupvbblankflags(struct sis_video_info *ivideo, u32 *vcount, u32 *hcount) { u8 idx, reg1, reg2, reg3, reg4; u32 ret = 0; (*vcount) = (*hcount) = 0; if((ivideo->currentvbflags & VB_DISPTYPE_DISP2) && (!(sisfb_bridgeisslave(ivideo)))) { ret |= (FB_VBLANK_HAVE_VSYNC | FB_VBLANK_HAVE_HBLANK | FB_VBLANK_HAVE_VBLANK | FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_HCOUNT); switch(ivideo->sisvga_engine) { case SIS_300_VGA: idx = 0x25; break; default: case SIS_315_VGA: idx = 0x30; break; } inSISIDXREG(SISPART1,(idx+0),reg1); /* 30 */ inSISIDXREG(SISPART1,(idx+1),reg2); /* 31 */ inSISIDXREG(SISPART1,(idx+2),reg3); /* 32 */ inSISIDXREG(SISPART1,(idx+3),reg4); /* 33 */ if(reg1 & 0x01) ret |= FB_VBLANK_VBLANKING; if(reg1 & 0x02) ret |= FB_VBLANK_VSYNCING; if(reg4 & 0x80) ret |= FB_VBLANK_HBLANKING; (*vcount) = reg3 | ((reg4 & 0x70) << 4); (*hcount) = reg2 | ((reg4 & 0x0f) << 8); } else if(sisfballowretracecrt1(ivideo)) { ret |= (FB_VBLANK_HAVE_VSYNC | FB_VBLANK_HAVE_VBLANK | FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_HCOUNT); reg1 = inSISREG(SISINPSTAT); if(reg1 & 0x08) ret |= FB_VBLANK_VSYNCING; if(reg1 & 0x01) ret |= FB_VBLANK_VBLANKING; inSISIDXREG(SISCR,0x20,reg1); inSISIDXREG(SISCR,0x1b,reg1); inSISIDXREG(SISCR,0x1c,reg2); inSISIDXREG(SISCR,0x1d,reg3); (*vcount) = reg2 | ((reg3 & 0x07) << 8); (*hcount) = (reg1 | ((reg3 & 0x10) << 4)) << 3; } return ret; } static int sisfb_myblank(struct sis_video_info *ivideo, int blank) { u8 sr01, sr11, sr1f, cr63=0, p2_0, p1_13; bool backlight = true; switch(blank) { case FB_BLANK_UNBLANK: /* on */ sr01 = 0x00; sr11 = 0x00; sr1f = 0x00; cr63 = 0x00; p2_0 = 0x20; p1_13 = 0x00; backlight = true; break; case FB_BLANK_NORMAL: /* blank */ sr01 = 0x20; sr11 = 0x00; sr1f = 0x00; cr63 = 0x00; p2_0 = 0x20; p1_13 = 0x00; backlight = true; break; case FB_BLANK_VSYNC_SUSPEND: /* no vsync */ sr01 = 0x20; sr11 = 0x08; sr1f = 0x80; cr63 = 0x40; p2_0 = 0x40; p1_13 = 0x80; backlight = false; break; case FB_BLANK_HSYNC_SUSPEND: /* no hsync */ sr01 = 0x20; sr11 = 0x08; sr1f = 0x40; cr63 = 0x40; p2_0 = 0x80; p1_13 = 0x40; backlight = false; break; case FB_BLANK_POWERDOWN: /* off */ sr01 = 0x20; sr11 = 0x08; sr1f = 0xc0; cr63 = 0x40; p2_0 = 0xc0; p1_13 = 0xc0; backlight = false; break; default: return 1; } if(ivideo->currentvbflags & VB_DISPTYPE_CRT1) { if( (!ivideo->sisfb_thismonitor.datavalid) || ((ivideo->sisfb_thismonitor.datavalid) && (ivideo->sisfb_thismonitor.feature & 0xe0))) { if(ivideo->sisvga_engine == SIS_315_VGA) { setSISIDXREG(SISCR, ivideo->SiS_Pr.SiS_MyCR63, 0xbf, cr63); } if(!(sisfb_bridgeisslave(ivideo))) { setSISIDXREG(SISSR, 0x01, ~0x20, sr01); setSISIDXREG(SISSR, 0x1f, 0x3f, sr1f); } } } if(ivideo->currentvbflags & CRT2_LCD) { if(ivideo->vbflags2 & VB2_SISLVDSBRIDGE) { if(backlight) { SiS_SiS30xBLOn(&ivideo->SiS_Pr); } else { SiS_SiS30xBLOff(&ivideo->SiS_Pr); } } else if(ivideo->sisvga_engine == SIS_315_VGA) { #ifdef CONFIG_FB_SIS_315 if(ivideo->vbflags2 & VB2_CHRONTEL) { if(backlight) { SiS_Chrontel701xBLOn(&ivideo->SiS_Pr); } else { SiS_Chrontel701xBLOff(&ivideo->SiS_Pr); } } #endif } if(((ivideo->sisvga_engine == SIS_300_VGA) && (ivideo->vbflags2 & (VB2_301|VB2_30xBDH|VB2_LVDS))) || ((ivideo->sisvga_engine == SIS_315_VGA) && ((ivideo->vbflags2 & (VB2_LVDS | VB2_CHRONTEL)) == VB2_LVDS))) { setSISIDXREG(SISSR, 0x11, ~0x0c, sr11); } if(ivideo->sisvga_engine == SIS_300_VGA) { if((ivideo->vbflags2 & VB2_30xB) && (!(ivideo->vbflags2 & VB2_30xBDH))) { setSISIDXREG(SISPART1, 0x13, 0x3f, p1_13); } } else if(ivideo->sisvga_engine == SIS_315_VGA) { if((ivideo->vbflags2 & VB2_30xB) && (!(ivideo->vbflags2 & VB2_30xBDH))) { setSISIDXREG(SISPART2, 0x00, 0x1f, p2_0); } } } else if(ivideo->currentvbflags & CRT2_VGA) { if(ivideo->vbflags2 & VB2_30xB) { setSISIDXREG(SISPART2, 0x00, 0x1f, p2_0); } } return 0; } /* ------------- Callbacks from init.c/init301.c -------------- */ #ifdef CONFIG_FB_SIS_300 unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg) { struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo; u32 val = 0; pci_read_config_dword(ivideo->nbridge, reg, &val); return (unsigned int)val; } void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, unsigned int val) { struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo; pci_write_config_dword(ivideo->nbridge, reg, (u32)val); } unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg) { struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo; u32 val = 0; if(!ivideo->lpcdev) return 0; pci_read_config_dword(ivideo->lpcdev, reg, &val); return (unsigned int)val; } #endif #ifdef CONFIG_FB_SIS_315 void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, unsigned char val) { struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo; pci_write_config_byte(ivideo->nbridge, reg, (u8)val); } unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg) { struct sis_video_info *ivideo = (struct sis_video_info *)SiS_Pr->ivideo; u16 val = 0; if(!ivideo->lpcdev) return 0; pci_read_config_word(ivideo->lpcdev, reg, &val); return (unsigned int)val; } #endif /* ----------- FBDev related routines for all series ----------- */ static int sisfb_get_cmap_len(const struct fb_var_screeninfo *var) { return (var->bits_per_pixel == 8) ? 256 : 16; } static void sisfb_set_vparms(struct sis_video_info *ivideo) { switch(ivideo->video_bpp) { case 8: ivideo->DstColor = 0x0000; ivideo->SiS310_AccelDepth = 0x00000000; ivideo->video_cmap_len = 256; break; case 16: ivideo->DstColor = 0x8000; ivideo->SiS310_AccelDepth = 0x00010000; ivideo->video_cmap_len = 16; break; case 32: ivideo->DstColor = 0xC000; ivideo->SiS310_AccelDepth = 0x00020000; ivideo->video_cmap_len = 16; break; default: ivideo->video_cmap_len = 16; printk(KERN_ERR "sisfb: Unsupported depth %d", ivideo->video_bpp); ivideo->accel = 0; } } static int sisfb_calc_maxyres(struct sis_video_info *ivideo, struct fb_var_screeninfo *var) { int maxyres = ivideo->sisfb_mem / (var->xres_virtual * (var->bits_per_pixel >> 3)); if(maxyres > 32767) maxyres = 32767; return maxyres; } static void sisfb_calc_pitch(struct sis_video_info *ivideo, struct fb_var_screeninfo *var) { ivideo->video_linelength = var->xres_virtual * (var->bits_per_pixel >> 3); ivideo->scrnpitchCRT1 = ivideo->video_linelength; if(!(ivideo->currentvbflags & CRT1_LCDA)) { if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { ivideo->scrnpitchCRT1 <<= 1; } } } static void sisfb_set_pitch(struct sis_video_info *ivideo) { bool isslavemode = false; unsigned short HDisplay1 = ivideo->scrnpitchCRT1 >> 3; unsigned short HDisplay2 = ivideo->video_linelength >> 3; if(sisfb_bridgeisslave(ivideo)) isslavemode = true; /* We need to set pitch for CRT1 if bridge is in slave mode, too */ if((ivideo->currentvbflags & VB_DISPTYPE_DISP1) || (isslavemode)) { outSISIDXREG(SISCR,0x13,(HDisplay1 & 0xFF)); setSISIDXREG(SISSR,0x0E,0xF0,(HDisplay1 >> 8)); } /* We must not set the pitch for CRT2 if bridge is in slave mode */ if((ivideo->currentvbflags & VB_DISPTYPE_DISP2) && (!isslavemode)) { orSISIDXREG(SISPART1,ivideo->CRT2_write_enable,0x01); outSISIDXREG(SISPART1,0x07,(HDisplay2 & 0xFF)); setSISIDXREG(SISPART1,0x09,0xF0,(HDisplay2 >> 8)); } } static void sisfb_bpp_to_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var) { ivideo->video_cmap_len = sisfb_get_cmap_len(var); switch(var->bits_per_pixel) { case 8: var->red.offset = var->green.offset = var->blue.offset = 0; var->red.length = var->green.length = var->blue.length = 8; break; case 16: var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 32: var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } } static int sisfb_set_mode(struct sis_video_info *ivideo, int clrscrn) { unsigned short modeno = ivideo->mode_no; /* >=2.6.12's fbcon clears the screen anyway */ modeno |= 0x80; outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD); sisfb_pre_setmode(ivideo); if(!SiSSetMode(&ivideo->SiS_Pr, modeno)) { printk(KERN_ERR "sisfb: Setting mode[0x%x] failed\n", ivideo->mode_no); return -EINVAL; } outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD); sisfb_post_setmode(ivideo); return 0; } static int sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; unsigned int htotal = 0, vtotal = 0; unsigned int drate = 0, hrate = 0; int found_mode = 0, ret; int old_mode; u32 pixclock; htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; vtotal = var->upper_margin + var->lower_margin + var->vsync_len; pixclock = var->pixclock; if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) { vtotal += var->yres; vtotal <<= 1; } else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) { vtotal += var->yres; vtotal <<= 2; } else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { vtotal += var->yres; vtotal <<= 1; } else vtotal += var->yres; if(!(htotal) || !(vtotal)) { DPRINTK("sisfb: Invalid 'var' information\n"); return -EINVAL; } if(pixclock && htotal && vtotal) { drate = 1000000000 / pixclock; hrate = (drate * 1000) / htotal; ivideo->refresh_rate = (unsigned int) (hrate * 2 / vtotal); } else { ivideo->refresh_rate = 60; } old_mode = ivideo->sisfb_mode_idx; ivideo->sisfb_mode_idx = 0; while( (sisbios_mode[ivideo->sisfb_mode_idx].mode_no[0] != 0) && (sisbios_mode[ivideo->sisfb_mode_idx].xres <= var->xres) ) { if( (sisbios_mode[ivideo->sisfb_mode_idx].xres == var->xres) && (sisbios_mode[ivideo->sisfb_mode_idx].yres == var->yres) && (sisbios_mode[ivideo->sisfb_mode_idx].bpp == var->bits_per_pixel)) { ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]; found_mode = 1; break; } ivideo->sisfb_mode_idx++; } if(found_mode) { ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo, ivideo->sisfb_mode_idx, ivideo->currentvbflags); } else { ivideo->sisfb_mode_idx = -1; } if(ivideo->sisfb_mode_idx < 0) { printk(KERN_ERR "sisfb: Mode %dx%dx%d not supported\n", var->xres, var->yres, var->bits_per_pixel); ivideo->sisfb_mode_idx = old_mode; return -EINVAL; } ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]; if(sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx) == 0) { ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx; ivideo->refresh_rate = 60; } if(isactive) { /* If acceleration to be used? Need to know * before pre/post_set_mode() */ ivideo->accel = 0; #if defined(FBINFO_HWACCEL_DISABLED) && defined(FBINFO_HWACCEL_XPAN) #ifdef STUPID_ACCELF_TEXT_SHIT if(var->accel_flags & FB_ACCELF_TEXT) { info->flags &= ~FBINFO_HWACCEL_DISABLED; } else { info->flags |= FBINFO_HWACCEL_DISABLED; } #endif if(!(info->flags & FBINFO_HWACCEL_DISABLED)) ivideo->accel = -1; #else if(var->accel_flags & FB_ACCELF_TEXT) ivideo->accel = -1; #endif if((ret = sisfb_set_mode(ivideo, 1))) { return ret; } ivideo->video_bpp = sisbios_mode[ivideo->sisfb_mode_idx].bpp; ivideo->video_width = sisbios_mode[ivideo->sisfb_mode_idx].xres; ivideo->video_height = sisbios_mode[ivideo->sisfb_mode_idx].yres; sisfb_calc_pitch(ivideo, var); sisfb_set_pitch(ivideo); sisfb_set_vparms(ivideo); ivideo->current_width = ivideo->video_width; ivideo->current_height = ivideo->video_height; ivideo->current_bpp = ivideo->video_bpp; ivideo->current_htotal = htotal; ivideo->current_vtotal = vtotal; ivideo->current_linelength = ivideo->video_linelength; ivideo->current_pixclock = var->pixclock; ivideo->current_refresh_rate = ivideo->refresh_rate; ivideo->sisfb_lastrates[ivideo->mode_no] = ivideo->refresh_rate; } return 0; } static void sisfb_set_base_CRT1(struct sis_video_info *ivideo, unsigned int base) { outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD); outSISIDXREG(SISCR, 0x0D, base & 0xFF); outSISIDXREG(SISCR, 0x0C, (base >> 8) & 0xFF); outSISIDXREG(SISSR, 0x0D, (base >> 16) & 0xFF); if(ivideo->sisvga_engine == SIS_315_VGA) { setSISIDXREG(SISSR, 0x37, 0xFE, (base >> 24) & 0x01); } } static void sisfb_set_base_CRT2(struct sis_video_info *ivideo, unsigned int base) { if(ivideo->currentvbflags & VB_DISPTYPE_DISP2) { orSISIDXREG(SISPART1, ivideo->CRT2_write_enable, 0x01); outSISIDXREG(SISPART1, 0x06, (base & 0xFF)); outSISIDXREG(SISPART1, 0x05, ((base >> 8) & 0xFF)); outSISIDXREG(SISPART1, 0x04, ((base >> 16) & 0xFF)); if(ivideo->sisvga_engine == SIS_315_VGA) { setSISIDXREG(SISPART1, 0x02, 0x7F, ((base >> 24) & 0x01) << 7); } } } static int sisfb_pan_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var) { if(var->xoffset > (var->xres_virtual - var->xres)) { return -EINVAL; } if(var->yoffset > (var->yres_virtual - var->yres)) { return -EINVAL; } ivideo->current_base = (var->yoffset * var->xres_virtual) + var->xoffset; /* calculate base bpp dep. */ switch(var->bits_per_pixel) { case 32: break; case 16: ivideo->current_base >>= 1; break; case 8: default: ivideo->current_base >>= 2; break; } ivideo->current_base += (ivideo->video_offset >> 2); sisfb_set_base_CRT1(ivideo, ivideo->current_base); sisfb_set_base_CRT2(ivideo, ivideo->current_base); return 0; } static int sisfb_open(struct fb_info *info, int user) { return 0; } static int sisfb_release(struct fb_info *info, int user) { return 0; } static int sisfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; if(regno >= sisfb_get_cmap_len(&info->var)) return 1; switch(info->var.bits_per_pixel) { case 8: outSISREG(SISDACA, regno); outSISREG(SISDACD, (red >> 10)); outSISREG(SISDACD, (green >> 10)); outSISREG(SISDACD, (blue >> 10)); if(ivideo->currentvbflags & VB_DISPTYPE_DISP2) { outSISREG(SISDAC2A, regno); outSISREG(SISDAC2D, (red >> 8)); outSISREG(SISDAC2D, (green >> 8)); outSISREG(SISDAC2D, (blue >> 8)); } break; case 16: if (regno >= 16) break; ((u32 *)(info->pseudo_palette))[regno] = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); break; case 32: if (regno >= 16) break; red >>= 8; green >>= 8; blue >>= 8; ((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | (blue); break; } return 0; } static int sisfb_set_par(struct fb_info *info) { int err; if((err = sisfb_do_set_var(&info->var, 1, info))) return err; sisfb_get_fix(&info->fix, -1, info); return 0; } static int sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; unsigned int htotal = 0, vtotal = 0, myrateindex = 0; unsigned int drate = 0, hrate = 0, maxyres; int found_mode = 0; int refresh_rate, search_idx, tidx; bool recalc_clock = false; u32 pixclock; htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; vtotal = var->upper_margin + var->lower_margin + var->vsync_len; pixclock = var->pixclock; if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) { vtotal += var->yres; vtotal <<= 1; } else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) { vtotal += var->yres; vtotal <<= 2; } else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { vtotal += var->yres; vtotal <<= 1; } else vtotal += var->yres; if(!(htotal) || !(vtotal)) { SISFAIL("sisfb: no valid timing data"); } search_idx = 0; while( (sisbios_mode[search_idx].mode_no[0] != 0) && (sisbios_mode[search_idx].xres <= var->xres) ) { if( (sisbios_mode[search_idx].xres == var->xres) && (sisbios_mode[search_idx].yres == var->yres) && (sisbios_mode[search_idx].bpp == var->bits_per_pixel)) { if((tidx = sisfb_validate_mode(ivideo, search_idx, ivideo->currentvbflags)) > 0) { found_mode = 1; search_idx = tidx; break; } } search_idx++; } if(!found_mode) { search_idx = 0; while(sisbios_mode[search_idx].mode_no[0] != 0) { if( (var->xres <= sisbios_mode[search_idx].xres) && (var->yres <= sisbios_mode[search_idx].yres) && (var->bits_per_pixel == sisbios_mode[search_idx].bpp) ) { if((tidx = sisfb_validate_mode(ivideo,search_idx, ivideo->currentvbflags)) > 0) { found_mode = 1; search_idx = tidx; break; } } search_idx++; } if(found_mode) { printk(KERN_DEBUG "sisfb: Adapted from %dx%dx%d to %dx%dx%d\n", var->xres, var->yres, var->bits_per_pixel, sisbios_mode[search_idx].xres, sisbios_mode[search_idx].yres, var->bits_per_pixel); var->xres = sisbios_mode[search_idx].xres; var->yres = sisbios_mode[search_idx].yres; } else { printk(KERN_ERR "sisfb: Failed to find supported mode near %dx%dx%d\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } } if( ((ivideo->vbflags2 & VB2_LVDS) || ((ivideo->vbflags2 & VB2_30xBDH) && (ivideo->currentvbflags & CRT2_LCD))) && (var->bits_per_pixel == 8) ) { /* Slave modes on LVDS and 301B-DH */ refresh_rate = 60; recalc_clock = true; } else if( (ivideo->current_htotal == htotal) && (ivideo->current_vtotal == vtotal) && (ivideo->current_pixclock == pixclock) ) { /* x=x & y=y & c=c -> assume depth change */ drate = 1000000000 / pixclock; hrate = (drate * 1000) / htotal; refresh_rate = (unsigned int) (hrate * 2 / vtotal); } else if( ( (ivideo->current_htotal != htotal) || (ivideo->current_vtotal != vtotal) ) && (ivideo->current_pixclock == var->pixclock) ) { /* x!=x | y!=y & c=c -> invalid pixclock */ if(ivideo->sisfb_lastrates[sisbios_mode[search_idx].mode_no[ivideo->mni]]) { refresh_rate = ivideo->sisfb_lastrates[sisbios_mode[search_idx].mode_no[ivideo->mni]]; } else if(ivideo->sisfb_parm_rate != -1) { /* Sic, sisfb_parm_rate - want to know originally desired rate here */ refresh_rate = ivideo->sisfb_parm_rate; } else { refresh_rate = 60; } recalc_clock = true; } else if((pixclock) && (htotal) && (vtotal)) { drate = 1000000000 / pixclock; hrate = (drate * 1000) / htotal; refresh_rate = (unsigned int) (hrate * 2 / vtotal); } else if(ivideo->current_refresh_rate) { refresh_rate = ivideo->current_refresh_rate; recalc_clock = true; } else { refresh_rate = 60; recalc_clock = true; } myrateindex = sisfb_search_refresh_rate(ivideo, refresh_rate, search_idx); /* Eventually recalculate timing and clock */ if(recalc_clock) { if(!myrateindex) myrateindex = sisbios_mode[search_idx].rate_idx; var->pixclock = (u32) (1000000000 / sisfb_mode_rate_to_dclock(&ivideo->SiS_Pr, sisbios_mode[search_idx].mode_no[ivideo->mni], myrateindex)); sisfb_mode_rate_to_ddata(&ivideo->SiS_Pr, sisbios_mode[search_idx].mode_no[ivideo->mni], myrateindex, var); if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) { var->pixclock <<= 1; } } if(ivideo->sisfb_thismonitor.datavalid) { if(!sisfb_verify_rate(ivideo, &ivideo->sisfb_thismonitor, search_idx, myrateindex, refresh_rate)) { printk(KERN_INFO "sisfb: WARNING: Refresh rate exceeds monitor specs!\n"); } } /* Adapt RGB settings */ sisfb_bpp_to_var(ivideo, var); /* Sanity check for offsets */ if(var->xoffset < 0) var->xoffset = 0; if(var->yoffset < 0) var->yoffset = 0; if(var->xres > var->xres_virtual) var->xres_virtual = var->xres; if(ivideo->sisfb_ypan) { maxyres = sisfb_calc_maxyres(ivideo, var); if(ivideo->sisfb_max) { var->yres_virtual = maxyres; } else { if(var->yres_virtual > maxyres) { var->yres_virtual = maxyres; } } if(var->yres_virtual <= var->yres) { var->yres_virtual = var->yres; } } else { if(var->yres != var->yres_virtual) { var->yres_virtual = var->yres; } var->xoffset = 0; var->yoffset = 0; } /* Truncate offsets to maximum if too high */ if(var->xoffset > var->xres_virtual - var->xres) { var->xoffset = var->xres_virtual - var->xres - 1; } if(var->yoffset > var->yres_virtual - var->yres) { var->yoffset = var->yres_virtual - var->yres - 1; } /* Set everything else to 0 */ var->red.msb_right = var->green.msb_right = var->blue.msb_right = var->transp.offset = var->transp.length = var->transp.msb_right = 0; return 0; } static int sisfb_pan_display(struct fb_var_screeninfo *var, struct fb_info* info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; int err; if(var->xoffset > (var->xres_virtual - var->xres)) return -EINVAL; if(var->yoffset > (var->yres_virtual - var->yres)) return -EINVAL; if(var->vmode & FB_VMODE_YWRAP) return -EINVAL; if(var->xoffset + info->var.xres > info->var.xres_virtual || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; if((err = sisfb_pan_var(ivideo, var)) < 0) return err; info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; return 0; } static int sisfb_blank(int blank, struct fb_info *info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; return sisfb_myblank(ivideo, blank); } /* ----------- FBDev related routines for all series ---------- */ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; struct sis_memreq sismemreq; struct fb_vblank sisvbblank; u32 gpu32 = 0; #ifndef __user #define __user #endif u32 __user *argp = (u32 __user *)arg; switch(cmd) { case FBIO_ALLOC: if(!capable(CAP_SYS_RAWIO)) return -EPERM; if(copy_from_user(&sismemreq, (void __user *)arg, sizeof(sismemreq))) return -EFAULT; sis_malloc(&sismemreq); if(copy_to_user((void __user *)arg, &sismemreq, sizeof(sismemreq))) { sis_free((u32)sismemreq.offset); return -EFAULT; } break; case FBIO_FREE: if(!capable(CAP_SYS_RAWIO)) return -EPERM; if(get_user(gpu32, argp)) return -EFAULT; sis_free(gpu32); break; case FBIOGET_VBLANK: memset(&sisvbblank, 0, sizeof(struct fb_vblank)); sisvbblank.count = 0; sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); if(copy_to_user((void __user *)arg, &sisvbblank, sizeof(sisvbblank))) return -EFAULT; break; case SISFB_GET_INFO_SIZE: return put_user(sizeof(struct sisfb_info), argp); case SISFB_GET_INFO_OLD: if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); case SISFB_GET_INFO: /* For communication with X driver */ ivideo->sisfb_infoblock.sisfb_id = SISFB_ID; ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR; ivideo->sisfb_infoblock.sisfb_revision = VER_MINOR; ivideo->sisfb_infoblock.sisfb_patchlevel = VER_LEVEL; ivideo->sisfb_infoblock.chip_id = ivideo->chip_id; ivideo->sisfb_infoblock.sisfb_pci_vendor = ivideo->chip_vendor; ivideo->sisfb_infoblock.memory = ivideo->video_size / 1024; ivideo->sisfb_infoblock.heapstart = ivideo->heapstart / 1024; if(ivideo->modechanged) { ivideo->sisfb_infoblock.fbvidmode = ivideo->mode_no; } else { ivideo->sisfb_infoblock.fbvidmode = ivideo->modeprechange; } ivideo->sisfb_infoblock.sisfb_caps = ivideo->caps; ivideo->sisfb_infoblock.sisfb_tqlen = ivideo->cmdQueueSize / 1024; ivideo->sisfb_infoblock.sisfb_pcibus = ivideo->pcibus; ivideo->sisfb_infoblock.sisfb_pcislot = ivideo->pcislot; ivideo->sisfb_infoblock.sisfb_pcifunc = ivideo->pcifunc; ivideo->sisfb_infoblock.sisfb_lcdpdc = ivideo->detectedpdc; ivideo->sisfb_infoblock.sisfb_lcdpdca = ivideo->detectedpdca; ivideo->sisfb_infoblock.sisfb_lcda = ivideo->detectedlcda; ivideo->sisfb_infoblock.sisfb_vbflags = ivideo->vbflags; ivideo->sisfb_infoblock.sisfb_currentvbflags = ivideo->currentvbflags; ivideo->sisfb_infoblock.sisfb_scalelcd = ivideo->SiS_Pr.UsePanelScaler; ivideo->sisfb_infoblock.sisfb_specialtiming = ivideo->SiS_Pr.SiS_CustomT; ivideo->sisfb_infoblock.sisfb_haveemi = ivideo->SiS_Pr.HaveEMI ? 1 : 0; ivideo->sisfb_infoblock.sisfb_haveemilcd = ivideo->SiS_Pr.HaveEMILCD ? 1 : 0; ivideo->sisfb_infoblock.sisfb_emi30 = ivideo->SiS_Pr.EMI_30; ivideo->sisfb_infoblock.sisfb_emi31 = ivideo->SiS_Pr.EMI_31; ivideo->sisfb_infoblock.sisfb_emi32 = ivideo->SiS_Pr.EMI_32; ivideo->sisfb_infoblock.sisfb_emi33 = ivideo->SiS_Pr.EMI_33; ivideo->sisfb_infoblock.sisfb_tvxpos = (u16)(ivideo->tvxpos + 32); ivideo->sisfb_infoblock.sisfb_tvypos = (u16)(ivideo->tvypos + 32); ivideo->sisfb_infoblock.sisfb_heapsize = ivideo->sisfb_heap_size / 1024; ivideo->sisfb_infoblock.sisfb_videooffset = ivideo->video_offset; ivideo->sisfb_infoblock.sisfb_curfstn = ivideo->curFSTN; ivideo->sisfb_infoblock.sisfb_curdstn = ivideo->curDSTN; ivideo->sisfb_infoblock.sisfb_vbflags2 = ivideo->vbflags2; ivideo->sisfb_infoblock.sisfb_can_post = ivideo->sisfb_can_post ? 1 : 0; ivideo->sisfb_infoblock.sisfb_card_posted = ivideo->sisfb_card_posted ? 1 : 0; ivideo->sisfb_infoblock.sisfb_was_boot_device = ivideo->sisfb_was_boot_device ? 1 : 0; if(copy_to_user((void __user *)arg, &ivideo->sisfb_infoblock, sizeof(ivideo->sisfb_infoblock))) return -EFAULT; break; case SISFB_GET_VBRSTATUS_OLD: if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); case SISFB_GET_VBRSTATUS: if(sisfb_CheckVBRetrace(ivideo)) return put_user((u32)1, argp); else return put_user((u32)0, argp); case SISFB_GET_AUTOMAXIMIZE_OLD: if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); case SISFB_GET_AUTOMAXIMIZE: if(ivideo->sisfb_max) return put_user((u32)1, argp); else return put_user((u32)0, argp); case SISFB_SET_AUTOMAXIMIZE_OLD: if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); case SISFB_SET_AUTOMAXIMIZE: if(get_user(gpu32, argp)) return -EFAULT; ivideo->sisfb_max = (gpu32) ? 1 : 0; break; case SISFB_SET_TVPOSOFFSET: if(get_user(gpu32, argp)) return -EFAULT; sisfb_set_TVxposoffset(ivideo, ((int)(gpu32 >> 16)) - 32); sisfb_set_TVyposoffset(ivideo, ((int)(gpu32 & 0xffff)) - 32); break; case SISFB_GET_TVPOSOFFSET: return put_user((u32)(((ivideo->tvxpos+32)<<16)|((ivideo->tvypos+32)&0xffff)), argp); case SISFB_COMMAND: if(copy_from_user(&ivideo->sisfb_command, (void __user *)arg, sizeof(struct sisfb_cmd))) return -EFAULT; sisfb_handle_command(ivideo, &ivideo->sisfb_command); if(copy_to_user((void __user *)arg, &ivideo->sisfb_command, sizeof(struct sisfb_cmd))) return -EFAULT; break; case SISFB_SET_LOCK: if(get_user(gpu32, argp)) return -EFAULT; ivideo->sisfblocked = (gpu32) ? 1 : 0; break; default: #ifdef SIS_NEW_CONFIG_COMPAT return -ENOIOCTLCMD; #else return -EINVAL; #endif } return 0; } static int sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; memset(fix, 0, sizeof(struct fb_fix_screeninfo)); strlcpy(fix->id, ivideo->myid, sizeof(fix->id)); mutex_lock(&info->mm_lock); fix->smem_start = ivideo->video_base + ivideo->video_offset; fix->smem_len = ivideo->sisfb_mem; mutex_unlock(&info->mm_lock); fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; fix->xpanstep = 1; fix->ypanstep = (ivideo->sisfb_ypan) ? 1 : 0; fix->ywrapstep = 0; fix->line_length = ivideo->video_linelength; fix->mmio_start = ivideo->mmio_base; fix->mmio_len = ivideo->mmio_size; if(ivideo->sisvga_engine == SIS_300_VGA) { fix->accel = FB_ACCEL_SIS_GLAMOUR; } else if((ivideo->chip == SIS_330) || (ivideo->chip == SIS_760) || (ivideo->chip == SIS_761)) { fix->accel = FB_ACCEL_SIS_XABRE; } else if(ivideo->chip == XGI_20) { fix->accel = FB_ACCEL_XGI_VOLARI_Z; } else if(ivideo->chip >= XGI_40) { fix->accel = FB_ACCEL_XGI_VOLARI_V; } else { fix->accel = FB_ACCEL_SIS_GLAMOUR_2; } return 0; } /* ---------------- fb_ops structures ----------------- */ static struct fb_ops sisfb_ops = { .owner = THIS_MODULE, .fb_open = sisfb_open, .fb_release = sisfb_release, .fb_check_var = sisfb_check_var, .fb_set_par = sisfb_set_par, .fb_setcolreg = sisfb_setcolreg, .fb_pan_display = sisfb_pan_display, .fb_blank = sisfb_blank, .fb_fillrect = fbcon_sis_fillrect, .fb_copyarea = fbcon_sis_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = fbcon_sis_sync, #ifdef SIS_NEW_CONFIG_COMPAT .fb_compat_ioctl= sisfb_ioctl, #endif .fb_ioctl = sisfb_ioctl }; /* ---------------- Chip generation dependent routines ---------------- */ static struct pci_dev * __devinit sisfb_get_northbridge(int basechipid) { struct pci_dev *pdev = NULL; int nbridgenum, nbridgeidx, i; static const unsigned short nbridgeids[] = { PCI_DEVICE_ID_SI_540, /* for SiS 540 VGA */ PCI_DEVICE_ID_SI_630, /* for SiS 630/730 VGA */ PCI_DEVICE_ID_SI_730, PCI_DEVICE_ID_SI_550, /* for SiS 550 VGA */ PCI_DEVICE_ID_SI_650, /* for SiS 650/651/740 VGA */ PCI_DEVICE_ID_SI_651, PCI_DEVICE_ID_SI_740, PCI_DEVICE_ID_SI_661, /* for SiS 661/741/660/760/761 VGA */ PCI_DEVICE_ID_SI_741, PCI_DEVICE_ID_SI_660, PCI_DEVICE_ID_SI_760, PCI_DEVICE_ID_SI_761 }; switch(basechipid) { #ifdef CONFIG_FB_SIS_300 case SIS_540: nbridgeidx = 0; nbridgenum = 1; break; case SIS_630: nbridgeidx = 1; nbridgenum = 2; break; #endif #ifdef CONFIG_FB_SIS_315 case SIS_550: nbridgeidx = 3; nbridgenum = 1; break; case SIS_650: nbridgeidx = 4; nbridgenum = 3; break; case SIS_660: nbridgeidx = 7; nbridgenum = 5; break; #endif default: return NULL; } for(i = 0; i < nbridgenum; i++) { if((pdev = pci_get_device(PCI_VENDOR_ID_SI, nbridgeids[nbridgeidx+i], NULL))) break; } return pdev; } static int __devinit sisfb_get_dram_size(struct sis_video_info *ivideo) { #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) u8 reg; #endif ivideo->video_size = 0; ivideo->UMAsize = ivideo->LFBsize = 0; switch(ivideo->chip) { #ifdef CONFIG_FB_SIS_300 case SIS_300: inSISIDXREG(SISSR, 0x14, reg); ivideo->video_size = ((reg & 0x3F) + 1) << 20; break; case SIS_540: case SIS_630: case SIS_730: if(!ivideo->nbridge) return -1; pci_read_config_byte(ivideo->nbridge, 0x63, &reg); ivideo->video_size = 1 << (((reg & 0x70) >> 4) + 21); break; #endif #ifdef CONFIG_FB_SIS_315 case SIS_315H: case SIS_315PRO: case SIS_315: inSISIDXREG(SISSR, 0x14, reg); ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20; switch((reg >> 2) & 0x03) { case 0x01: case 0x03: ivideo->video_size <<= 1; break; case 0x02: ivideo->video_size += (ivideo->video_size/2); } break; case SIS_330: inSISIDXREG(SISSR, 0x14, reg); ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20; if(reg & 0x0c) ivideo->video_size <<= 1; break; case SIS_550: case SIS_650: case SIS_740: inSISIDXREG(SISSR, 0x14, reg); ivideo->video_size = (((reg & 0x3f) + 1) << 2) << 20; break; case SIS_661: case SIS_741: inSISIDXREG(SISCR, 0x79, reg); ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20; break; case SIS_660: case SIS_760: case SIS_761: inSISIDXREG(SISCR, 0x79, reg); reg = (reg & 0xf0) >> 4; if(reg) { ivideo->video_size = (1 << reg) << 20; ivideo->UMAsize = ivideo->video_size; } inSISIDXREG(SISCR, 0x78, reg); reg &= 0x30; if(reg) { if(reg == 0x10) { ivideo->LFBsize = (32 << 20); } else { ivideo->LFBsize = (64 << 20); } ivideo->video_size += ivideo->LFBsize; } break; case SIS_340: case XGI_20: case XGI_40: inSISIDXREG(SISSR, 0x14, reg); ivideo->video_size = (1 << ((reg & 0xf0) >> 4)) << 20; if(ivideo->chip != XGI_20) { reg = (reg & 0x0c) >> 2; if(ivideo->revision_id == 2) { if(reg & 0x01) reg = 0x02; else reg = 0x00; } if(reg == 0x02) ivideo->video_size <<= 1; else if(reg == 0x03) ivideo->video_size <<= 2; } break; #endif default: return -1; } return 0; } /* -------------- video bridge device detection --------------- */ static void __devinit sisfb_detect_VB_connect(struct sis_video_info *ivideo) { u8 cr32, temp; /* No CRT2 on XGI Z7 */ if(ivideo->chip == XGI_20) { ivideo->sisfb_crt1off = 0; return; } #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { inSISIDXREG(SISSR, 0x17, temp); if((temp & 0x0F) && (ivideo->chip != SIS_300)) { /* PAL/NTSC is stored on SR16 on such machines */ if(!(ivideo->vbflags & (TV_PAL | TV_NTSC | TV_PALM | TV_PALN))) { inSISIDXREG(SISSR, 0x16, temp); if(temp & 0x20) ivideo->vbflags |= TV_PAL; else ivideo->vbflags |= TV_NTSC; } } } #endif inSISIDXREG(SISCR, 0x32, cr32); if(cr32 & SIS_CRT1) { ivideo->sisfb_crt1off = 0; } else { ivideo->sisfb_crt1off = (cr32 & 0xDF) ? 1 : 0; } ivideo->vbflags &= ~(CRT2_TV | CRT2_LCD | CRT2_VGA); if(cr32 & SIS_VB_TV) ivideo->vbflags |= CRT2_TV; if(cr32 & SIS_VB_LCD) ivideo->vbflags |= CRT2_LCD; if(cr32 & SIS_VB_CRT2) ivideo->vbflags |= CRT2_VGA; /* Check given parms for hardware compatibility. * (Cannot do this in the search_xx routines since we don't * know what hardware we are running on then) */ if(ivideo->chip != SIS_550) { ivideo->sisfb_dstn = ivideo->sisfb_fstn = 0; } if(ivideo->sisfb_tvplug != -1) { if( (ivideo->sisvga_engine != SIS_315_VGA) || (!(ivideo->vbflags2 & VB2_SISYPBPRBRIDGE)) ) { if(ivideo->sisfb_tvplug & TV_YPBPR) { ivideo->sisfb_tvplug = -1; printk(KERN_ERR "sisfb: YPbPr not supported\n"); } } } if(ivideo->sisfb_tvplug != -1) { if( (ivideo->sisvga_engine != SIS_315_VGA) || (!(ivideo->vbflags2 & VB2_SISHIVISIONBRIDGE)) ) { if(ivideo->sisfb_tvplug & TV_HIVISION) { ivideo->sisfb_tvplug = -1; printk(KERN_ERR "sisfb: HiVision not supported\n"); } } } if(ivideo->sisfb_tvstd != -1) { if( (!(ivideo->vbflags2 & VB2_SISBRIDGE)) && (!((ivideo->sisvga_engine == SIS_315_VGA) && (ivideo->vbflags2 & VB2_CHRONTEL))) ) { if(ivideo->sisfb_tvstd & (TV_PALM | TV_PALN | TV_NTSCJ)) { ivideo->sisfb_tvstd = -1; printk(KERN_ERR "sisfb: PALM/PALN/NTSCJ not supported\n"); } } } /* Detect/set TV plug & type */ if(ivideo->sisfb_tvplug != -1) { ivideo->vbflags |= ivideo->sisfb_tvplug; } else { if(cr32 & SIS_VB_YPBPR) ivideo->vbflags |= (TV_YPBPR|TV_YPBPR525I); /* default: 480i */ else if(cr32 & SIS_VB_HIVISION) ivideo->vbflags |= TV_HIVISION; else if(cr32 & SIS_VB_SCART) ivideo->vbflags |= TV_SCART; else { if(cr32 & SIS_VB_SVIDEO) ivideo->vbflags |= TV_SVIDEO; if(cr32 & SIS_VB_COMPOSITE) ivideo->vbflags |= TV_AVIDEO; } } if(!(ivideo->vbflags & (TV_YPBPR | TV_HIVISION))) { if(ivideo->sisfb_tvstd != -1) { ivideo->vbflags &= ~(TV_NTSC | TV_PAL | TV_PALM | TV_PALN | TV_NTSCJ); ivideo->vbflags |= ivideo->sisfb_tvstd; } if(ivideo->vbflags & TV_SCART) { ivideo->vbflags &= ~(TV_NTSC | TV_PALM | TV_PALN | TV_NTSCJ); ivideo->vbflags |= TV_PAL; } if(!(ivideo->vbflags & (TV_PAL | TV_NTSC | TV_PALM | TV_PALN | TV_NTSCJ))) { if(ivideo->sisvga_engine == SIS_300_VGA) { inSISIDXREG(SISSR, 0x38, temp); if(temp & 0x01) ivideo->vbflags |= TV_PAL; else ivideo->vbflags |= TV_NTSC; } else if((ivideo->chip <= SIS_315PRO) || (ivideo->chip >= SIS_330)) { inSISIDXREG(SISSR, 0x38, temp); if(temp & 0x01) ivideo->vbflags |= TV_PAL; else ivideo->vbflags |= TV_NTSC; } else { inSISIDXREG(SISCR, 0x79, temp); if(temp & 0x20) ivideo->vbflags |= TV_PAL; else ivideo->vbflags |= TV_NTSC; } } } /* Copy forceCRT1 option to CRT1off if option is given */ if(ivideo->sisfb_forcecrt1 != -1) { ivideo->sisfb_crt1off = (ivideo->sisfb_forcecrt1) ? 0 : 1; } } /* ------------------ Sensing routines ------------------ */ static bool __devinit sisfb_test_DDC1(struct sis_video_info *ivideo) { unsigned short old; int count = 48; old = SiS_ReadDDC1Bit(&ivideo->SiS_Pr); do { if(old != SiS_ReadDDC1Bit(&ivideo->SiS_Pr)) break; } while(count--); return (count != -1); } static void __devinit sisfb_sense_crt1(struct sis_video_info *ivideo) { bool mustwait = false; u8 sr1F, cr17; #ifdef CONFIG_FB_SIS_315 u8 cr63=0; #endif u16 temp = 0xffff; int i; inSISIDXREG(SISSR,0x1F,sr1F); orSISIDXREG(SISSR,0x1F,0x04); andSISIDXREG(SISSR,0x1F,0x3F); if(sr1F & 0xc0) mustwait = true; #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { inSISIDXREG(SISCR,ivideo->SiS_Pr.SiS_MyCR63,cr63); cr63 &= 0x40; andSISIDXREG(SISCR,ivideo->SiS_Pr.SiS_MyCR63,0xBF); } #endif inSISIDXREG(SISCR,0x17,cr17); cr17 &= 0x80; if(!cr17) { orSISIDXREG(SISCR,0x17,0x80); mustwait = true; outSISIDXREG(SISSR, 0x00, 0x01); outSISIDXREG(SISSR, 0x00, 0x03); } if(mustwait) { for(i=0; i < 10; i++) sisfbwaitretracecrt1(ivideo); } #ifdef CONFIG_FB_SIS_315 if(ivideo->chip >= SIS_330) { andSISIDXREG(SISCR,0x32,~0x20); if(ivideo->chip >= SIS_340) { outSISIDXREG(SISCR, 0x57, 0x4a); } else { outSISIDXREG(SISCR, 0x57, 0x5f); } orSISIDXREG(SISCR, 0x53, 0x02); while((inSISREG(SISINPSTAT)) & 0x01) break; while(!((inSISREG(SISINPSTAT)) & 0x01)) break; if((inSISREG(SISMISCW)) & 0x10) temp = 1; andSISIDXREG(SISCR, 0x53, 0xfd); andSISIDXREG(SISCR, 0x57, 0x00); } #endif if(temp == 0xffff) { i = 3; do { temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine, 0, 0, NULL, ivideo->vbflags2); } while(((temp == 0) || (temp == 0xffff)) && i--); if((temp == 0) || (temp == 0xffff)) { if(sisfb_test_DDC1(ivideo)) temp = 1; } } if((temp) && (temp != 0xffff)) { orSISIDXREG(SISCR,0x32,0x20); } #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { setSISIDXREG(SISCR,ivideo->SiS_Pr.SiS_MyCR63,0xBF,cr63); } #endif setSISIDXREG(SISCR,0x17,0x7F,cr17); outSISIDXREG(SISSR,0x1F,sr1F); } /* Determine and detect attached devices on SiS30x */ static void __devinit SiS_SenseLCD(struct sis_video_info *ivideo) { unsigned char buffer[256]; unsigned short temp, realcrtno, i; u8 reg, cr37 = 0, paneltype = 0; u16 xres, yres; ivideo->SiS_Pr.PanelSelfDetected = false; /* LCD detection only for TMDS bridges */ if(!(ivideo->vbflags2 & VB2_SISTMDSBRIDGE)) return; if(ivideo->vbflags2 & VB2_30xBDH) return; /* If LCD already set up by BIOS, skip it */ inSISIDXREG(SISCR, 0x32, reg); if(reg & 0x08) return; realcrtno = 1; if(ivideo->SiS_Pr.DDCPortMixup) realcrtno = 0; /* Check DDC capabilities */ temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine, realcrtno, 0, &buffer[0], ivideo->vbflags2); if((!temp) || (temp == 0xffff) || (!(temp & 0x02))) return; /* Read DDC data */ i = 3; /* Number of retrys */ do { temp = SiS_HandleDDC(&ivideo->SiS_Pr, ivideo->vbflags, ivideo->sisvga_engine, realcrtno, 1, &buffer[0], ivideo->vbflags2); } while((temp) && i--); if(temp) return; /* No digital device */ if(!(buffer[0x14] & 0x80)) return; /* First detailed timing preferred timing? */ if(!(buffer[0x18] & 0x02)) return; xres = buffer[0x38] | ((buffer[0x3a] & 0xf0) << 4); yres = buffer[0x3b] | ((buffer[0x3d] & 0xf0) << 4); switch(xres) { case 1024: if(yres == 768) paneltype = 0x02; break; case 1280: if(yres == 1024) paneltype = 0x03; break; case 1600: if((yres == 1200) && (ivideo->vbflags2 & VB2_30xC)) paneltype = 0x0b; break; } if(!paneltype) return; if(buffer[0x23]) cr37 |= 0x10; if((buffer[0x47] & 0x18) == 0x18) cr37 |= ((((buffer[0x47] & 0x06) ^ 0x06) << 5) | 0x20); else cr37 |= 0xc0; outSISIDXREG(SISCR, 0x36, paneltype); cr37 &= 0xf1; setSISIDXREG(SISCR, 0x37, 0x0c, cr37); orSISIDXREG(SISCR, 0x32, 0x08); ivideo->SiS_Pr.PanelSelfDetected = true; } static int __devinit SISDoSense(struct sis_video_info *ivideo, u16 type, u16 test) { int temp, mytest, result, i, j; for(j = 0; j < 10; j++) { result = 0; for(i = 0; i < 3; i++) { mytest = test; outSISIDXREG(SISPART4,0x11,(type & 0x00ff)); temp = (type >> 8) | (mytest & 0x00ff); setSISIDXREG(SISPART4,0x10,0xe0,temp); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1500); mytest >>= 8; mytest &= 0x7f; inSISIDXREG(SISPART4,0x03,temp); temp ^= 0x0e; temp &= mytest; if(temp == mytest) result++; #if 1 outSISIDXREG(SISPART4,0x11,0x00); andSISIDXREG(SISPART4,0x10,0xe0); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x1000); #endif } if((result == 0) || (result >= 2)) break; } return result; } static void __devinit SiS_Sense30x(struct sis_video_info *ivideo) { u8 backupP4_0d,backupP2_00,backupP2_4d,backupSR_1e,biosflag=0; u16 svhs=0, svhs_c=0; u16 cvbs=0, cvbs_c=0; u16 vga2=0, vga2_c=0; int myflag, result; char stdstr[] = "sisfb: Detected"; char tvstr[] = "TV connected to"; if(ivideo->vbflags2 & VB2_301) { svhs = 0x00b9; cvbs = 0x00b3; vga2 = 0x00d1; inSISIDXREG(SISPART4,0x01,myflag); if(myflag & 0x04) { svhs = 0x00dd; cvbs = 0x00ee; vga2 = 0x00fd; } } else if(ivideo->vbflags2 & (VB2_301B | VB2_302B)) { svhs = 0x016b; cvbs = 0x0174; vga2 = 0x0190; } else if(ivideo->vbflags2 & (VB2_301LV | VB2_302LV)) { svhs = 0x0200; cvbs = 0x0100; } else if(ivideo->vbflags2 & (VB2_301C | VB2_302ELV | VB2_307T | VB2_307LV)) { svhs = 0x016b; cvbs = 0x0110; vga2 = 0x0190; } else return; vga2_c = 0x0e08; svhs_c = 0x0404; cvbs_c = 0x0804; if(ivideo->vbflags & (VB2_301LV|VB2_302LV|VB2_302ELV|VB2_307LV)) { svhs_c = 0x0408; cvbs_c = 0x0808; } biosflag = 2; if(ivideo->haveXGIROM) { biosflag = ivideo->bios_abase[0x58] & 0x03; } else if(ivideo->newrom) { if(ivideo->bios_abase[0x5d] & 0x04) biosflag |= 0x01; } else if(ivideo->sisvga_engine == SIS_300_VGA) { if(ivideo->bios_abase) { biosflag = ivideo->bios_abase[0xfe] & 0x03; } } if(ivideo->chip == SIS_300) { inSISIDXREG(SISSR,0x3b,myflag); if(!(myflag & 0x01)) vga2 = vga2_c = 0; } if(!(ivideo->vbflags2 & VB2_SISVGA2BRIDGE)) { vga2 = vga2_c = 0; } inSISIDXREG(SISSR,0x1e,backupSR_1e); orSISIDXREG(SISSR,0x1e,0x20); inSISIDXREG(SISPART4,0x0d,backupP4_0d); if(ivideo->vbflags2 & VB2_30xC) { setSISIDXREG(SISPART4,0x0d,~0x07,0x01); } else { orSISIDXREG(SISPART4,0x0d,0x04); } SiS_DDC2Delay(&ivideo->SiS_Pr, 0x2000); inSISIDXREG(SISPART2,0x00,backupP2_00); outSISIDXREG(SISPART2,0x00,((backupP2_00 | 0x1c) & 0xfc)); inSISIDXREG(SISPART2,0x4d,backupP2_4d); if(ivideo->vbflags2 & VB2_SISYPBPRBRIDGE) { outSISIDXREG(SISPART2,0x4d,(backupP2_4d & ~0x10)); } if(!(ivideo->vbflags2 & VB2_30xCLV)) { SISDoSense(ivideo, 0, 0); } andSISIDXREG(SISCR, 0x32, ~0x14); if(vga2_c || vga2) { if(SISDoSense(ivideo, vga2, vga2_c)) { if(biosflag & 0x01) { printk(KERN_INFO "%s %s SCART output\n", stdstr, tvstr); orSISIDXREG(SISCR, 0x32, 0x04); } else { printk(KERN_INFO "%s secondary VGA connection\n", stdstr); orSISIDXREG(SISCR, 0x32, 0x10); } } } andSISIDXREG(SISCR, 0x32, 0x3f); if(ivideo->vbflags2 & VB2_30xCLV) { orSISIDXREG(SISPART4,0x0d,0x04); } if((ivideo->sisvga_engine == SIS_315_VGA) && (ivideo->vbflags2 & VB2_SISYPBPRBRIDGE)) { outSISIDXREG(SISPART2,0x4d,(backupP2_4d | 0x10)); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x2000); if((result = SISDoSense(ivideo, svhs, 0x0604))) { if((result = SISDoSense(ivideo, cvbs, 0x0804))) { printk(KERN_INFO "%s %s YPbPr component output\n", stdstr, tvstr); orSISIDXREG(SISCR,0x32,0x80); } } outSISIDXREG(SISPART2,0x4d,backupP2_4d); } andSISIDXREG(SISCR, 0x32, ~0x03); if(!(ivideo->vbflags & TV_YPBPR)) { if((result = SISDoSense(ivideo, svhs, svhs_c))) { printk(KERN_INFO "%s %s SVIDEO output\n", stdstr, tvstr); orSISIDXREG(SISCR, 0x32, 0x02); } if((biosflag & 0x02) || (!result)) { if(SISDoSense(ivideo, cvbs, cvbs_c)) { printk(KERN_INFO "%s %s COMPOSITE output\n", stdstr, tvstr); orSISIDXREG(SISCR, 0x32, 0x01); } } } SISDoSense(ivideo, 0, 0); outSISIDXREG(SISPART2,0x00,backupP2_00); outSISIDXREG(SISPART4,0x0d,backupP4_0d); outSISIDXREG(SISSR,0x1e,backupSR_1e); if(ivideo->vbflags2 & VB2_30xCLV) { inSISIDXREG(SISPART2,0x00,biosflag); if(biosflag & 0x20) { for(myflag = 2; myflag > 0; myflag--) { biosflag ^= 0x20; outSISIDXREG(SISPART2,0x00,biosflag); } } } outSISIDXREG(SISPART2,0x00,backupP2_00); } /* Determine and detect attached TV's on Chrontel */ static void __devinit SiS_SenseCh(struct sis_video_info *ivideo) { #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) u8 temp1, temp2; char stdstr[] = "sisfb: Chrontel: Detected TV connected to"; #endif #ifdef CONFIG_FB_SIS_300 unsigned char test[3]; int i; #endif if(ivideo->chip < SIS_315H) { #ifdef CONFIG_FB_SIS_300 ivideo->SiS_Pr.SiS_IF_DEF_CH70xx = 1; /* Chrontel 700x */ SiS_SetChrontelGPIO(&ivideo->SiS_Pr, 0x9c); /* Set general purpose IO for Chrontel communication */ SiS_DDC2Delay(&ivideo->SiS_Pr, 1000); temp1 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x25); /* See Chrontel TB31 for explanation */ temp2 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0e); if(((temp2 & 0x07) == 0x01) || (temp2 & 0x04)) { SiS_SetCH700x(&ivideo->SiS_Pr, 0x0e, 0x0b); SiS_DDC2Delay(&ivideo->SiS_Pr, 300); } temp2 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x25); if(temp2 != temp1) temp1 = temp2; if((temp1 >= 0x22) && (temp1 <= 0x50)) { /* Read power status */ temp1 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0e); if((temp1 & 0x03) != 0x03) { /* Power all outputs */ SiS_SetCH700x(&ivideo->SiS_Pr, 0x0e,0x0b); SiS_DDC2Delay(&ivideo->SiS_Pr, 300); } /* Sense connected TV devices */ for(i = 0; i < 3; i++) { SiS_SetCH700x(&ivideo->SiS_Pr, 0x10, 0x01); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96); SiS_SetCH700x(&ivideo->SiS_Pr, 0x10, 0x00); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96); temp1 = SiS_GetCH700x(&ivideo->SiS_Pr, 0x10); if(!(temp1 & 0x08)) test[i] = 0x02; else if(!(temp1 & 0x02)) test[i] = 0x01; else test[i] = 0; SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96); } if(test[0] == test[1]) temp1 = test[0]; else if(test[0] == test[2]) temp1 = test[0]; else if(test[1] == test[2]) temp1 = test[1]; else { printk(KERN_INFO "sisfb: TV detection unreliable - test results varied\n"); temp1 = test[2]; } if(temp1 == 0x02) { printk(KERN_INFO "%s SVIDEO output\n", stdstr); ivideo->vbflags |= TV_SVIDEO; orSISIDXREG(SISCR, 0x32, 0x02); andSISIDXREG(SISCR, 0x32, ~0x05); } else if (temp1 == 0x01) { printk(KERN_INFO "%s CVBS output\n", stdstr); ivideo->vbflags |= TV_AVIDEO; orSISIDXREG(SISCR, 0x32, 0x01); andSISIDXREG(SISCR, 0x32, ~0x06); } else { SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x0e, 0x01, 0xF8); andSISIDXREG(SISCR, 0x32, ~0x07); } } else if(temp1 == 0) { SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x0e, 0x01, 0xF8); andSISIDXREG(SISCR, 0x32, ~0x07); } /* Set general purpose IO for Chrontel communication */ SiS_SetChrontelGPIO(&ivideo->SiS_Pr, 0x00); #endif } else { #ifdef CONFIG_FB_SIS_315 ivideo->SiS_Pr.SiS_IF_DEF_CH70xx = 2; /* Chrontel 7019 */ temp1 = SiS_GetCH701x(&ivideo->SiS_Pr, 0x49); SiS_SetCH701x(&ivideo->SiS_Pr, 0x49, 0x20); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96); temp2 = SiS_GetCH701x(&ivideo->SiS_Pr, 0x20); temp2 |= 0x01; SiS_SetCH701x(&ivideo->SiS_Pr, 0x20, temp2); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96); temp2 ^= 0x01; SiS_SetCH701x(&ivideo->SiS_Pr, 0x20, temp2); SiS_DDC2Delay(&ivideo->SiS_Pr, 0x96); temp2 = SiS_GetCH701x(&ivideo->SiS_Pr, 0x20); SiS_SetCH701x(&ivideo->SiS_Pr, 0x49, temp1); temp1 = 0; if(temp2 & 0x02) temp1 |= 0x01; if(temp2 & 0x10) temp1 |= 0x01; if(temp2 & 0x04) temp1 |= 0x02; if( (temp1 & 0x01) && (temp1 & 0x02) ) temp1 = 0x04; switch(temp1) { case 0x01: printk(KERN_INFO "%s CVBS output\n", stdstr); ivideo->vbflags |= TV_AVIDEO; orSISIDXREG(SISCR, 0x32, 0x01); andSISIDXREG(SISCR, 0x32, ~0x06); break; case 0x02: printk(KERN_INFO "%s SVIDEO output\n", stdstr); ivideo->vbflags |= TV_SVIDEO; orSISIDXREG(SISCR, 0x32, 0x02); andSISIDXREG(SISCR, 0x32, ~0x05); break; case 0x04: printk(KERN_INFO "%s SCART output\n", stdstr); orSISIDXREG(SISCR, 0x32, 0x04); andSISIDXREG(SISCR, 0x32, ~0x03); break; default: andSISIDXREG(SISCR, 0x32, ~0x07); } #endif } } static void __devinit sisfb_get_VB_type(struct sis_video_info *ivideo) { char stdstr[] = "sisfb: Detected"; char bridgestr[] = "video bridge"; u8 vb_chipid; u8 reg; /* No CRT2 on XGI Z7 */ if(ivideo->chip == XGI_20) return; inSISIDXREG(SISPART4, 0x00, vb_chipid); switch(vb_chipid) { case 0x01: inSISIDXREG(SISPART4, 0x01, reg); if(reg < 0xb0) { ivideo->vbflags |= VB_301; /* Deprecated */ ivideo->vbflags2 |= VB2_301; printk(KERN_INFO "%s SiS301 %s\n", stdstr, bridgestr); } else if(reg < 0xc0) { ivideo->vbflags |= VB_301B; /* Deprecated */ ivideo->vbflags2 |= VB2_301B; inSISIDXREG(SISPART4,0x23,reg); if(!(reg & 0x02)) { ivideo->vbflags |= VB_30xBDH; /* Deprecated */ ivideo->vbflags2 |= VB2_30xBDH; printk(KERN_INFO "%s SiS301B-DH %s\n", stdstr, bridgestr); } else { printk(KERN_INFO "%s SiS301B %s\n", stdstr, bridgestr); } } else if(reg < 0xd0) { ivideo->vbflags |= VB_301C; /* Deprecated */ ivideo->vbflags2 |= VB2_301C; printk(KERN_INFO "%s SiS301C %s\n", stdstr, bridgestr); } else if(reg < 0xe0) { ivideo->vbflags |= VB_301LV; /* Deprecated */ ivideo->vbflags2 |= VB2_301LV; printk(KERN_INFO "%s SiS301LV %s\n", stdstr, bridgestr); } else if(reg <= 0xe1) { inSISIDXREG(SISPART4,0x39,reg); if(reg == 0xff) { ivideo->vbflags |= VB_302LV; /* Deprecated */ ivideo->vbflags2 |= VB2_302LV; printk(KERN_INFO "%s SiS302LV %s\n", stdstr, bridgestr); } else { ivideo->vbflags |= VB_301C; /* Deprecated */ ivideo->vbflags2 |= VB2_301C; printk(KERN_INFO "%s SiS301C(P4) %s\n", stdstr, bridgestr); #if 0 ivideo->vbflags |= VB_302ELV; /* Deprecated */ ivideo->vbflags2 |= VB2_302ELV; printk(KERN_INFO "%s SiS302ELV %s\n", stdstr, bridgestr); #endif } } break; case 0x02: ivideo->vbflags |= VB_302B; /* Deprecated */ ivideo->vbflags2 |= VB2_302B; printk(KERN_INFO "%s SiS302B %s\n", stdstr, bridgestr); break; } if((!(ivideo->vbflags2 & VB2_VIDEOBRIDGE)) && (ivideo->chip != SIS_300)) { inSISIDXREG(SISCR, 0x37, reg); reg &= SIS_EXTERNAL_CHIP_MASK; reg >>= 1; if(ivideo->sisvga_engine == SIS_300_VGA) { #ifdef CONFIG_FB_SIS_300 switch(reg) { case SIS_EXTERNAL_CHIP_LVDS: ivideo->vbflags |= VB_LVDS; /* Deprecated */ ivideo->vbflags2 |= VB2_LVDS; break; case SIS_EXTERNAL_CHIP_TRUMPION: ivideo->vbflags |= (VB_LVDS | VB_TRUMPION); /* Deprecated */ ivideo->vbflags2 |= (VB2_LVDS | VB2_TRUMPION); break; case SIS_EXTERNAL_CHIP_CHRONTEL: ivideo->vbflags |= VB_CHRONTEL; /* Deprecated */ ivideo->vbflags2 |= VB2_CHRONTEL; break; case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL: ivideo->vbflags |= (VB_LVDS | VB_CHRONTEL); /* Deprecated */ ivideo->vbflags2 |= (VB2_LVDS | VB2_CHRONTEL); break; } if(ivideo->vbflags2 & VB2_CHRONTEL) ivideo->chronteltype = 1; #endif } else if(ivideo->chip < SIS_661) { #ifdef CONFIG_FB_SIS_315 switch (reg) { case SIS310_EXTERNAL_CHIP_LVDS: ivideo->vbflags |= VB_LVDS; /* Deprecated */ ivideo->vbflags2 |= VB2_LVDS; break; case SIS310_EXTERNAL_CHIP_LVDS_CHRONTEL: ivideo->vbflags |= (VB_LVDS | VB_CHRONTEL); /* Deprecated */ ivideo->vbflags2 |= (VB2_LVDS | VB2_CHRONTEL); break; } if(ivideo->vbflags2 & VB2_CHRONTEL) ivideo->chronteltype = 2; #endif } else if(ivideo->chip >= SIS_661) { #ifdef CONFIG_FB_SIS_315 inSISIDXREG(SISCR, 0x38, reg); reg >>= 5; switch(reg) { case 0x02: ivideo->vbflags |= VB_LVDS; /* Deprecated */ ivideo->vbflags2 |= VB2_LVDS; break; case 0x03: ivideo->vbflags |= (VB_LVDS | VB_CHRONTEL); /* Deprecated */ ivideo->vbflags2 |= (VB2_LVDS | VB2_CHRONTEL); break; case 0x04: ivideo->vbflags |= (VB_LVDS | VB_CONEXANT); /* Deprecated */ ivideo->vbflags2 |= (VB2_LVDS | VB2_CONEXANT); break; } if(ivideo->vbflags2 & VB2_CHRONTEL) ivideo->chronteltype = 2; #endif } if(ivideo->vbflags2 & VB2_LVDS) { printk(KERN_INFO "%s LVDS transmitter\n", stdstr); } if((ivideo->sisvga_engine == SIS_300_VGA) && (ivideo->vbflags2 & VB2_TRUMPION)) { printk(KERN_INFO "%s Trumpion Zurac LCD scaler\n", stdstr); } if(ivideo->vbflags2 & VB2_CHRONTEL) { printk(KERN_INFO "%s Chrontel TV encoder\n", stdstr); } if((ivideo->chip >= SIS_661) && (ivideo->vbflags2 & VB2_CONEXANT)) { printk(KERN_INFO "%s Conexant external device\n", stdstr); } } if(ivideo->vbflags2 & VB2_SISBRIDGE) { SiS_SenseLCD(ivideo); SiS_Sense30x(ivideo); } else if(ivideo->vbflags2 & VB2_CHRONTEL) { SiS_SenseCh(ivideo); } } /* ---------- Engine initialization routines ------------ */ static void sisfb_engine_init(struct sis_video_info *ivideo) { /* Initialize command queue (we use MMIO only) */ /* BEFORE THIS IS CALLED, THE ENGINES *MUST* BE SYNC'ED */ ivideo->caps &= ~(TURBO_QUEUE_CAP | MMIO_CMD_QUEUE_CAP | VM_CMD_QUEUE_CAP | AGP_CMD_QUEUE_CAP); #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { u32 tqueue_pos; u8 tq_state; tqueue_pos = (ivideo->video_size - ivideo->cmdQueueSize) / (64 * 1024); inSISIDXREG(SISSR, IND_SIS_TURBOQUEUE_SET, tq_state); tq_state |= 0xf0; tq_state &= 0xfc; tq_state |= (u8)(tqueue_pos >> 8); outSISIDXREG(SISSR, IND_SIS_TURBOQUEUE_SET, tq_state); outSISIDXREG(SISSR, IND_SIS_TURBOQUEUE_ADR, (u8)(tqueue_pos & 0xff)); ivideo->caps |= TURBO_QUEUE_CAP; } #endif #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { u32 tempq = 0, templ; u8 temp; if(ivideo->chip == XGI_20) { switch(ivideo->cmdQueueSize) { case (64 * 1024): temp = SIS_CMD_QUEUE_SIZE_Z7_64k; break; case (128 * 1024): default: temp = SIS_CMD_QUEUE_SIZE_Z7_128k; } } else { switch(ivideo->cmdQueueSize) { case (4 * 1024 * 1024): temp = SIS_CMD_QUEUE_SIZE_4M; break; case (2 * 1024 * 1024): temp = SIS_CMD_QUEUE_SIZE_2M; break; case (1 * 1024 * 1024): temp = SIS_CMD_QUEUE_SIZE_1M; break; default: case (512 * 1024): temp = SIS_CMD_QUEUE_SIZE_512k; } } outSISIDXREG(SISSR, IND_SIS_CMDQUEUE_THRESHOLD, COMMAND_QUEUE_THRESHOLD); outSISIDXREG(SISSR, IND_SIS_CMDQUEUE_SET, SIS_CMD_QUEUE_RESET); if((ivideo->chip >= XGI_40) && ivideo->modechanged) { /* Must disable dual pipe on XGI_40. Can't do * this in MMIO mode, because it requires * setting/clearing a bit in the MMIO fire trigger * register. */ if(!((templ = MMIO_IN32(ivideo->mmio_vbase, 0x8240)) & (1 << 10))) { MMIO_OUT32(ivideo->mmio_vbase, Q_WRITE_PTR, 0); outSISIDXREG(SISSR, IND_SIS_CMDQUEUE_SET, (temp | SIS_VRAM_CMDQUEUE_ENABLE)); tempq = MMIO_IN32(ivideo->mmio_vbase, Q_READ_PTR); MMIO_OUT32(ivideo->mmio_vbase, Q_WRITE_PTR, tempq); tempq = (u32)(ivideo->video_size - ivideo->cmdQueueSize); MMIO_OUT32(ivideo->mmio_vbase, Q_BASE_ADDR, tempq); writel(0x16800000 + 0x8240, ivideo->video_vbase + tempq); writel(templ | (1 << 10), ivideo->video_vbase + tempq + 4); writel(0x168F0000, ivideo->video_vbase + tempq + 8); writel(0x168F0000, ivideo->video_vbase + tempq + 12); MMIO_OUT32(ivideo->mmio_vbase, Q_WRITE_PTR, (tempq + 16)); sisfb_syncaccel(ivideo); outSISIDXREG(SISSR, IND_SIS_CMDQUEUE_SET, SIS_CMD_QUEUE_RESET); } } tempq = MMIO_IN32(ivideo->mmio_vbase, MMIO_QUEUE_READPORT); MMIO_OUT32(ivideo->mmio_vbase, MMIO_QUEUE_WRITEPORT, tempq); temp |= (SIS_MMIO_CMD_ENABLE | SIS_CMD_AUTO_CORR); outSISIDXREG(SISSR, IND_SIS_CMDQUEUE_SET, temp); tempq = (u32)(ivideo->video_size - ivideo->cmdQueueSize); MMIO_OUT32(ivideo->mmio_vbase, MMIO_QUEUE_PHYBASE, tempq); ivideo->caps |= MMIO_CMD_QUEUE_CAP; } #endif ivideo->engineok = 1; } static void __devinit sisfb_detect_lcd_type(struct sis_video_info *ivideo) { u8 reg; int i; inSISIDXREG(SISCR, 0x36, reg); reg &= 0x0f; if(ivideo->sisvga_engine == SIS_300_VGA) { ivideo->CRT2LCDType = sis300paneltype[reg]; } else if(ivideo->chip >= SIS_661) { ivideo->CRT2LCDType = sis661paneltype[reg]; } else { ivideo->CRT2LCDType = sis310paneltype[reg]; if((ivideo->chip == SIS_550) && (sisfb_fstn)) { if((ivideo->CRT2LCDType != LCD_320x240_2) && (ivideo->CRT2LCDType != LCD_320x240_3)) { ivideo->CRT2LCDType = LCD_320x240; } } } if(ivideo->CRT2LCDType == LCD_UNKNOWN) { /* For broken BIOSes: Assume 1024x768, RGB18 */ ivideo->CRT2LCDType = LCD_1024x768; setSISIDXREG(SISCR,0x36,0xf0,0x02); setSISIDXREG(SISCR,0x37,0xee,0x01); printk(KERN_DEBUG "sisfb: Invalid panel ID (%02x), assuming 1024x768, RGB18\n", reg); } for(i = 0; i < SIS_LCD_NUMBER; i++) { if(ivideo->CRT2LCDType == sis_lcd_data[i].lcdtype) { ivideo->lcdxres = sis_lcd_data[i].xres; ivideo->lcdyres = sis_lcd_data[i].yres; ivideo->lcddefmodeidx = sis_lcd_data[i].default_mode_idx; break; } } #ifdef CONFIG_FB_SIS_300 if(ivideo->SiS_Pr.SiS_CustomT == CUT_BARCO1366) { ivideo->lcdxres = 1360; ivideo->lcdyres = 1024; ivideo->lcddefmodeidx = DEFAULT_MODE_1360; } else if(ivideo->SiS_Pr.SiS_CustomT == CUT_PANEL848) { ivideo->lcdxres = 848; ivideo->lcdyres = 480; ivideo->lcddefmodeidx = DEFAULT_MODE_848; } else if(ivideo->SiS_Pr.SiS_CustomT == CUT_PANEL856) { ivideo->lcdxres = 856; ivideo->lcdyres = 480; ivideo->lcddefmodeidx = DEFAULT_MODE_856; } #endif printk(KERN_DEBUG "sisfb: Detected %dx%d flat panel\n", ivideo->lcdxres, ivideo->lcdyres); } static void __devinit sisfb_save_pdc_emi(struct sis_video_info *ivideo) { #ifdef CONFIG_FB_SIS_300 /* Save the current PanelDelayCompensation if the LCD is currently used */ if(ivideo->sisvga_engine == SIS_300_VGA) { if(ivideo->vbflags2 & (VB2_LVDS | VB2_30xBDH)) { int tmp; inSISIDXREG(SISCR,0x30,tmp); if(tmp & 0x20) { /* Currently on LCD? If yes, read current pdc */ inSISIDXREG(SISPART1,0x13,ivideo->detectedpdc); ivideo->detectedpdc &= 0x3c; if(ivideo->SiS_Pr.PDC == -1) { /* Let option override detection */ ivideo->SiS_Pr.PDC = ivideo->detectedpdc; } printk(KERN_INFO "sisfb: Detected LCD PDC 0x%02x\n", ivideo->detectedpdc); } if((ivideo->SiS_Pr.PDC != -1) && (ivideo->SiS_Pr.PDC != ivideo->detectedpdc)) { printk(KERN_INFO "sisfb: Using LCD PDC 0x%02x\n", ivideo->SiS_Pr.PDC); } } } #endif #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { /* Try to find about LCDA */ if(ivideo->vbflags2 & VB2_SISLCDABRIDGE) { int tmp; inSISIDXREG(SISPART1,0x13,tmp); if(tmp & 0x04) { ivideo->SiS_Pr.SiS_UseLCDA = true; ivideo->detectedlcda = 0x03; } } /* Save PDC */ if(ivideo->vbflags2 & VB2_SISLVDSBRIDGE) { int tmp; inSISIDXREG(SISCR,0x30,tmp); if((tmp & 0x20) || (ivideo->detectedlcda != 0xff)) { /* Currently on LCD? If yes, read current pdc */ u8 pdc; inSISIDXREG(SISPART1,0x2D,pdc); ivideo->detectedpdc = (pdc & 0x0f) << 1; ivideo->detectedpdca = (pdc & 0xf0) >> 3; inSISIDXREG(SISPART1,0x35,pdc); ivideo->detectedpdc |= ((pdc >> 7) & 0x01); inSISIDXREG(SISPART1,0x20,pdc); ivideo->detectedpdca |= ((pdc >> 6) & 0x01); if(ivideo->newrom) { /* New ROM invalidates other PDC resp. */ if(ivideo->detectedlcda != 0xff) { ivideo->detectedpdc = 0xff; } else { ivideo->detectedpdca = 0xff; } } if(ivideo->SiS_Pr.PDC == -1) { if(ivideo->detectedpdc != 0xff) { ivideo->SiS_Pr.PDC = ivideo->detectedpdc; } } if(ivideo->SiS_Pr.PDCA == -1) { if(ivideo->detectedpdca != 0xff) { ivideo->SiS_Pr.PDCA = ivideo->detectedpdca; } } if(ivideo->detectedpdc != 0xff) { printk(KERN_INFO "sisfb: Detected LCD PDC 0x%02x (for LCD=CRT2)\n", ivideo->detectedpdc); } if(ivideo->detectedpdca != 0xff) { printk(KERN_INFO "sisfb: Detected LCD PDC1 0x%02x (for LCD=CRT1)\n", ivideo->detectedpdca); } } /* Save EMI */ if(ivideo->vbflags2 & VB2_SISEMIBRIDGE) { inSISIDXREG(SISPART4,0x30,ivideo->SiS_Pr.EMI_30); inSISIDXREG(SISPART4,0x31,ivideo->SiS_Pr.EMI_31); inSISIDXREG(SISPART4,0x32,ivideo->SiS_Pr.EMI_32); inSISIDXREG(SISPART4,0x33,ivideo->SiS_Pr.EMI_33); ivideo->SiS_Pr.HaveEMI = true; if((tmp & 0x20) || (ivideo->detectedlcda != 0xff)) { ivideo->SiS_Pr.HaveEMILCD = true; } } } /* Let user override detected PDCs (all bridges) */ if(ivideo->vbflags2 & VB2_30xBLV) { if((ivideo->SiS_Pr.PDC != -1) && (ivideo->SiS_Pr.PDC != ivideo->detectedpdc)) { printk(KERN_INFO "sisfb: Using LCD PDC 0x%02x (for LCD=CRT2)\n", ivideo->SiS_Pr.PDC); } if((ivideo->SiS_Pr.PDCA != -1) && (ivideo->SiS_Pr.PDCA != ivideo->detectedpdca)) { printk(KERN_INFO "sisfb: Using LCD PDC1 0x%02x (for LCD=CRT1)\n", ivideo->SiS_Pr.PDCA); } } } #endif } /* -------------------- Memory manager routines ---------------------- */ static u32 __devinit sisfb_getheapstart(struct sis_video_info *ivideo) { u32 ret = ivideo->sisfb_parm_mem * 1024; u32 maxoffs = ivideo->video_size - ivideo->hwcursor_size - ivideo->cmdQueueSize; u32 def; /* Calculate heap start = end of memory for console * * CCCCCCCCDDDDDDDDDDDDDDDDDDDDDDDDDDDDHHHHQQQQQQQQQQ * C = console, D = heap, H = HWCursor, Q = cmd-queue * * On 76x in UMA+LFB mode, the layout is as follows: * DDDDDDDDDDDCCCCCCCCCCCCCCCCCCCCCCCCHHHHQQQQQQQQQQQ * where the heap is the entire UMA area, eventually * into the LFB area if the given mem parameter is * higher than the size of the UMA memory. * * Basically given by "mem" parameter * * maximum = videosize - cmd_queue - hwcursor * (results in a heap of size 0) * default = SiS 300: depends on videosize * SiS 315/330/340/XGI: 32k below max */ if(ivideo->sisvga_engine == SIS_300_VGA) { if(ivideo->video_size > 0x1000000) { def = 0xc00000; } else if(ivideo->video_size > 0x800000) { def = 0x800000; } else { def = 0x400000; } } else if(ivideo->UMAsize && ivideo->LFBsize) { ret = def = 0; } else { def = maxoffs - 0x8000; } /* Use default for secondary card for now (FIXME) */ if((!ret) || (ret > maxoffs) || (ivideo->cardnumber != 0)) ret = def; return ret; } static u32 __devinit sisfb_getheapsize(struct sis_video_info *ivideo) { u32 max = ivideo->video_size - ivideo->hwcursor_size - ivideo->cmdQueueSize; u32 ret = 0; if(ivideo->UMAsize && ivideo->LFBsize) { if( (!ivideo->sisfb_parm_mem) || ((ivideo->sisfb_parm_mem * 1024) > max) || ((max - (ivideo->sisfb_parm_mem * 1024)) < ivideo->UMAsize) ) { ret = ivideo->UMAsize; max -= ivideo->UMAsize; } else { ret = max - (ivideo->sisfb_parm_mem * 1024); max = ivideo->sisfb_parm_mem * 1024; } ivideo->video_offset = ret; ivideo->sisfb_mem = max; } else { ret = max - ivideo->heapstart; ivideo->sisfb_mem = ivideo->heapstart; } return ret; } static int __devinit sisfb_heap_init(struct sis_video_info *ivideo) { struct SIS_OH *poh; ivideo->video_offset = 0; if(ivideo->sisfb_parm_mem) { if( (ivideo->sisfb_parm_mem < (2 * 1024 * 1024)) || (ivideo->sisfb_parm_mem > ivideo->video_size) ) { ivideo->sisfb_parm_mem = 0; } } ivideo->heapstart = sisfb_getheapstart(ivideo); ivideo->sisfb_heap_size = sisfb_getheapsize(ivideo); ivideo->sisfb_heap_start = ivideo->video_vbase + ivideo->heapstart; ivideo->sisfb_heap_end = ivideo->sisfb_heap_start + ivideo->sisfb_heap_size; printk(KERN_INFO "sisfb: Memory heap starting at %dK, size %dK\n", (int)(ivideo->heapstart / 1024), (int)(ivideo->sisfb_heap_size / 1024)); ivideo->sisfb_heap.vinfo = ivideo; ivideo->sisfb_heap.poha_chain = NULL; ivideo->sisfb_heap.poh_freelist = NULL; poh = sisfb_poh_new_node(&ivideo->sisfb_heap); if(poh == NULL) return 1; poh->poh_next = &ivideo->sisfb_heap.oh_free; poh->poh_prev = &ivideo->sisfb_heap.oh_free; poh->size = ivideo->sisfb_heap_size; poh->offset = ivideo->heapstart; ivideo->sisfb_heap.oh_free.poh_next = poh; ivideo->sisfb_heap.oh_free.poh_prev = poh; ivideo->sisfb_heap.oh_free.size = 0; ivideo->sisfb_heap.max_freesize = poh->size; ivideo->sisfb_heap.oh_used.poh_next = &ivideo->sisfb_heap.oh_used; ivideo->sisfb_heap.oh_used.poh_prev = &ivideo->sisfb_heap.oh_used; ivideo->sisfb_heap.oh_used.size = SENTINEL; if(ivideo->cardnumber == 0) { /* For the first card, make this heap the "global" one * for old DRM (which could handle only one card) */ sisfb_heap = &ivideo->sisfb_heap; } return 0; } static struct SIS_OH * sisfb_poh_new_node(struct SIS_HEAP *memheap) { struct SIS_OHALLOC *poha; struct SIS_OH *poh; unsigned long cOhs; int i; if(memheap->poh_freelist == NULL) { poha = kmalloc(SIS_OH_ALLOC_SIZE, GFP_KERNEL); if(!poha) return NULL; poha->poha_next = memheap->poha_chain; memheap->poha_chain = poha; cOhs = (SIS_OH_ALLOC_SIZE - sizeof(struct SIS_OHALLOC)) / sizeof(struct SIS_OH) + 1; poh = &poha->aoh[0]; for(i = cOhs - 1; i != 0; i--) { poh->poh_next = poh + 1; poh = poh + 1; } poh->poh_next = NULL; memheap->poh_freelist = &poha->aoh[0]; } poh = memheap->poh_freelist; memheap->poh_freelist = poh->poh_next; return poh; } static struct SIS_OH * sisfb_poh_allocate(struct SIS_HEAP *memheap, u32 size) { struct SIS_OH *pohThis; struct SIS_OH *pohRoot; int bAllocated = 0; if(size > memheap->max_freesize) { DPRINTK("sisfb: Can't allocate %dk video memory\n", (unsigned int) size / 1024); return NULL; } pohThis = memheap->oh_free.poh_next; while(pohThis != &memheap->oh_free) { if(size <= pohThis->size) { bAllocated = 1; break; } pohThis = pohThis->poh_next; } if(!bAllocated) { DPRINTK("sisfb: Can't allocate %dk video memory\n", (unsigned int) size / 1024); return NULL; } if(size == pohThis->size) { pohRoot = pohThis; sisfb_delete_node(pohThis); } else { pohRoot = sisfb_poh_new_node(memheap); if(pohRoot == NULL) return NULL; pohRoot->offset = pohThis->offset; pohRoot->size = size; pohThis->offset += size; pohThis->size -= size; } memheap->max_freesize -= size; pohThis = &memheap->oh_used; sisfb_insert_node(pohThis, pohRoot); return pohRoot; } static void sisfb_delete_node(struct SIS_OH *poh) { poh->poh_prev->poh_next = poh->poh_next; poh->poh_next->poh_prev = poh->poh_prev; } static void sisfb_insert_node(struct SIS_OH *pohList, struct SIS_OH *poh) { struct SIS_OH *pohTemp = pohList->poh_next; pohList->poh_next = poh; pohTemp->poh_prev = poh; poh->poh_prev = pohList; poh->poh_next = pohTemp; } static struct SIS_OH * sisfb_poh_free(struct SIS_HEAP *memheap, u32 base) { struct SIS_OH *pohThis; struct SIS_OH *poh_freed; struct SIS_OH *poh_prev; struct SIS_OH *poh_next; u32 ulUpper; u32 ulLower; int foundNode = 0; poh_freed = memheap->oh_used.poh_next; while(poh_freed != &memheap->oh_used) { if(poh_freed->offset == base) { foundNode = 1; break; } poh_freed = poh_freed->poh_next; } if(!foundNode) return NULL; memheap->max_freesize += poh_freed->size; poh_prev = poh_next = NULL; ulUpper = poh_freed->offset + poh_freed->size; ulLower = poh_freed->offset; pohThis = memheap->oh_free.poh_next; while(pohThis != &memheap->oh_free) { if(pohThis->offset == ulUpper) { poh_next = pohThis; } else if((pohThis->offset + pohThis->size) == ulLower) { poh_prev = pohThis; } pohThis = pohThis->poh_next; } sisfb_delete_node(poh_freed); if(poh_prev && poh_next) { poh_prev->size += (poh_freed->size + poh_next->size); sisfb_delete_node(poh_next); sisfb_free_node(memheap, poh_freed); sisfb_free_node(memheap, poh_next); return poh_prev; } if(poh_prev) { poh_prev->size += poh_freed->size; sisfb_free_node(memheap, poh_freed); return poh_prev; } if(poh_next) { poh_next->size += poh_freed->size; poh_next->offset = poh_freed->offset; sisfb_free_node(memheap, poh_freed); return poh_next; } sisfb_insert_node(&memheap->oh_free, poh_freed); return poh_freed; } static void sisfb_free_node(struct SIS_HEAP *memheap, struct SIS_OH *poh) { if(poh == NULL) return; poh->poh_next = memheap->poh_freelist; memheap->poh_freelist = poh; } static void sis_int_malloc(struct sis_video_info *ivideo, struct sis_memreq *req) { struct SIS_OH *poh = NULL; if((ivideo) && (ivideo->sisfb_id == SISFB_ID) && (!ivideo->havenoheap)) poh = sisfb_poh_allocate(&ivideo->sisfb_heap, (u32)req->size); if(poh == NULL) { req->offset = req->size = 0; DPRINTK("sisfb: Video RAM allocation failed\n"); } else { req->offset = poh->offset; req->size = poh->size; DPRINTK("sisfb: Video RAM allocation succeeded: 0x%lx\n", (poh->offset + ivideo->video_vbase)); } } void sis_malloc(struct sis_memreq *req) { struct sis_video_info *ivideo = sisfb_heap->vinfo; if(&ivideo->sisfb_heap == sisfb_heap) sis_int_malloc(ivideo, req); else req->offset = req->size = 0; } void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); sis_int_malloc(ivideo, req); } /* sis_free: u32 because "base" is offset inside video ram, can never be >4GB */ static void sis_int_free(struct sis_video_info *ivideo, u32 base) { struct SIS_OH *poh; if((!ivideo) || (ivideo->sisfb_id != SISFB_ID) || (ivideo->havenoheap)) return; poh = sisfb_poh_free(&ivideo->sisfb_heap, base); if(poh == NULL) { DPRINTK("sisfb: sisfb_poh_free() failed at base 0x%x\n", (unsigned int) base); } } void sis_free(u32 base) { struct sis_video_info *ivideo = sisfb_heap->vinfo; sis_int_free(ivideo, base); } void sis_free_new(struct pci_dev *pdev, u32 base) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); sis_int_free(ivideo, base); } /* --------------------- SetMode routines ------------------------- */ static void sisfb_check_engine_and_sync(struct sis_video_info *ivideo) { u8 cr30, cr31; /* Check if MMIO and engines are enabled, * and sync in case they are. Can't use * ivideo->accel here, as this might have * been changed before this is called. */ inSISIDXREG(SISSR, IND_SIS_PCI_ADDRESS_SET, cr30); inSISIDXREG(SISSR, IND_SIS_MODULE_ENABLE, cr31); /* MMIO and 2D/3D engine enabled? */ if((cr30 & SIS_MEM_MAP_IO_ENABLE) && (cr31 & 0x42)) { #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { /* Don't care about TurboQueue. It's * enough to know that the engines * are enabled */ sisfb_syncaccel(ivideo); } #endif #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { /* Check that any queue mode is * enabled, and that the queue * is not in the state of "reset" */ inSISIDXREG(SISSR, 0x26, cr30); if((cr30 & 0xe0) && (!(cr30 & 0x01))) { sisfb_syncaccel(ivideo); } } #endif } } static void sisfb_pre_setmode(struct sis_video_info *ivideo) { u8 cr30 = 0, cr31 = 0, cr33 = 0, cr35 = 0, cr38 = 0; int tvregnum = 0; ivideo->currentvbflags &= (VB_VIDEOBRIDGE | VB_DISPTYPE_DISP2); outSISIDXREG(SISSR, 0x05, 0x86); inSISIDXREG(SISCR, 0x31, cr31); cr31 &= ~0x60; cr31 |= 0x04; cr33 = ivideo->rate_idx & 0x0F; #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { if(ivideo->chip >= SIS_661) { inSISIDXREG(SISCR, 0x38, cr38); cr38 &= ~0x07; /* Clear LCDA/DualEdge and YPbPr bits */ } else { tvregnum = 0x38; inSISIDXREG(SISCR, tvregnum, cr38); cr38 &= ~0x3b; /* Clear LCDA/DualEdge and YPbPr bits */ } } #endif #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { tvregnum = 0x35; inSISIDXREG(SISCR, tvregnum, cr38); } #endif SiS_SetEnableDstn(&ivideo->SiS_Pr, false); SiS_SetEnableFstn(&ivideo->SiS_Pr, false); ivideo->curFSTN = ivideo->curDSTN = 0; switch(ivideo->currentvbflags & VB_DISPTYPE_DISP2) { case CRT2_TV: cr38 &= ~0xc0; /* Clear PAL-M / PAL-N bits */ if((ivideo->vbflags & TV_YPBPR) && (ivideo->vbflags2 & VB2_SISYPBPRBRIDGE)) { #ifdef CONFIG_FB_SIS_315 if(ivideo->chip >= SIS_661) { cr38 |= 0x04; if(ivideo->vbflags & TV_YPBPR525P) cr35 |= 0x20; else if(ivideo->vbflags & TV_YPBPR750P) cr35 |= 0x40; else if(ivideo->vbflags & TV_YPBPR1080I) cr35 |= 0x60; cr30 |= SIS_SIMULTANEOUS_VIEW_ENABLE; cr35 &= ~0x01; ivideo->currentvbflags |= (TV_YPBPR | (ivideo->vbflags & TV_YPBPRALL)); } else if(ivideo->sisvga_engine == SIS_315_VGA) { cr30 |= (0x80 | SIS_SIMULTANEOUS_VIEW_ENABLE); cr38 |= 0x08; if(ivideo->vbflags & TV_YPBPR525P) cr38 |= 0x10; else if(ivideo->vbflags & TV_YPBPR750P) cr38 |= 0x20; else if(ivideo->vbflags & TV_YPBPR1080I) cr38 |= 0x30; cr31 &= ~0x01; ivideo->currentvbflags |= (TV_YPBPR | (ivideo->vbflags & TV_YPBPRALL)); } #endif } else if((ivideo->vbflags & TV_HIVISION) && (ivideo->vbflags2 & VB2_SISHIVISIONBRIDGE)) { if(ivideo->chip >= SIS_661) { cr38 |= 0x04; cr35 |= 0x60; } else { cr30 |= 0x80; } cr30 |= SIS_SIMULTANEOUS_VIEW_ENABLE; cr31 |= 0x01; cr35 |= 0x01; ivideo->currentvbflags |= TV_HIVISION; } else if(ivideo->vbflags & TV_SCART) { cr30 = (SIS_VB_OUTPUT_SCART | SIS_SIMULTANEOUS_VIEW_ENABLE); cr31 |= 0x01; cr35 |= 0x01; ivideo->currentvbflags |= TV_SCART; } else { if(ivideo->vbflags & TV_SVIDEO) { cr30 = (SIS_VB_OUTPUT_SVIDEO | SIS_SIMULTANEOUS_VIEW_ENABLE); ivideo->currentvbflags |= TV_SVIDEO; } if(ivideo->vbflags & TV_AVIDEO) { cr30 = (SIS_VB_OUTPUT_COMPOSITE | SIS_SIMULTANEOUS_VIEW_ENABLE); ivideo->currentvbflags |= TV_AVIDEO; } } cr31 |= SIS_DRIVER_MODE; if(ivideo->vbflags & (TV_AVIDEO | TV_SVIDEO)) { if(ivideo->vbflags & TV_PAL) { cr31 |= 0x01; cr35 |= 0x01; ivideo->currentvbflags |= TV_PAL; if(ivideo->vbflags & TV_PALM) { cr38 |= 0x40; cr35 |= 0x04; ivideo->currentvbflags |= TV_PALM; } else if(ivideo->vbflags & TV_PALN) { cr38 |= 0x80; cr35 |= 0x08; ivideo->currentvbflags |= TV_PALN; } } else { cr31 &= ~0x01; cr35 &= ~0x01; ivideo->currentvbflags |= TV_NTSC; if(ivideo->vbflags & TV_NTSCJ) { cr38 |= 0x40; cr35 |= 0x02; ivideo->currentvbflags |= TV_NTSCJ; } } } break; case CRT2_LCD: cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE); cr31 |= SIS_DRIVER_MODE; SiS_SetEnableDstn(&ivideo->SiS_Pr, ivideo->sisfb_dstn); SiS_SetEnableFstn(&ivideo->SiS_Pr, ivideo->sisfb_fstn); ivideo->curFSTN = ivideo->sisfb_fstn; ivideo->curDSTN = ivideo->sisfb_dstn; break; case CRT2_VGA: cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE); cr31 |= SIS_DRIVER_MODE; if(ivideo->sisfb_nocrt2rate) { cr33 |= (sisbios_mode[ivideo->sisfb_mode_idx].rate_idx << 4); } else { cr33 |= ((ivideo->rate_idx & 0x0F) << 4); } break; default: /* disable CRT2 */ cr30 = 0x00; cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE); } outSISIDXREG(SISCR, 0x30, cr30); outSISIDXREG(SISCR, 0x33, cr33); if(ivideo->chip >= SIS_661) { #ifdef CONFIG_FB_SIS_315 cr31 &= ~0x01; /* Clear PAL flag (now in CR35) */ setSISIDXREG(SISCR, 0x35, ~0x10, cr35); /* Leave overscan bit alone */ cr38 &= 0x07; /* Use only LCDA and HiVision/YPbPr bits */ setSISIDXREG(SISCR, 0x38, 0xf8, cr38); #endif } else if(ivideo->chip != SIS_300) { outSISIDXREG(SISCR, tvregnum, cr38); } outSISIDXREG(SISCR, 0x31, cr31); ivideo->SiS_Pr.SiS_UseOEM = ivideo->sisfb_useoem; sisfb_check_engine_and_sync(ivideo); } /* Fix SR11 for 661 and later */ #ifdef CONFIG_FB_SIS_315 static void sisfb_fixup_SR11(struct sis_video_info *ivideo) { u8 tmpreg; if(ivideo->chip >= SIS_661) { inSISIDXREG(SISSR,0x11,tmpreg); if(tmpreg & 0x20) { inSISIDXREG(SISSR,0x3e,tmpreg); tmpreg = (tmpreg + 1) & 0xff; outSISIDXREG(SISSR,0x3e,tmpreg); inSISIDXREG(SISSR,0x11,tmpreg); } if(tmpreg & 0xf0) { andSISIDXREG(SISSR,0x11,0x0f); } } } #endif static void sisfb_set_TVxposoffset(struct sis_video_info *ivideo, int val) { if(val > 32) val = 32; if(val < -32) val = -32; ivideo->tvxpos = val; if(ivideo->sisfblocked) return; if(!ivideo->modechanged) return; if(ivideo->currentvbflags & CRT2_TV) { if(ivideo->vbflags2 & VB2_CHRONTEL) { int x = ivideo->tvx; switch(ivideo->chronteltype) { case 1: x += val; if(x < 0) x = 0; outSISIDXREG(SISSR,0x05,0x86); SiS_SetCH700x(&ivideo->SiS_Pr, 0x0a, (x & 0xff)); SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x08, ((x & 0x0100) >> 7), 0xFD); break; case 2: /* Not supported by hardware */ break; } } else if(ivideo->vbflags2 & VB2_SISBRIDGE) { u8 p2_1f,p2_20,p2_2b,p2_42,p2_43; unsigned short temp; p2_1f = ivideo->p2_1f; p2_20 = ivideo->p2_20; p2_2b = ivideo->p2_2b; p2_42 = ivideo->p2_42; p2_43 = ivideo->p2_43; temp = p2_1f | ((p2_20 & 0xf0) << 4); temp += (val * 2); p2_1f = temp & 0xff; p2_20 = (temp & 0xf00) >> 4; p2_2b = ((p2_2b & 0x0f) + (val * 2)) & 0x0f; temp = p2_43 | ((p2_42 & 0xf0) << 4); temp += (val * 2); p2_43 = temp & 0xff; p2_42 = (temp & 0xf00) >> 4; outSISIDXREG(SISPART2,0x1f,p2_1f); setSISIDXREG(SISPART2,0x20,0x0F,p2_20); setSISIDXREG(SISPART2,0x2b,0xF0,p2_2b); setSISIDXREG(SISPART2,0x42,0x0F,p2_42); outSISIDXREG(SISPART2,0x43,p2_43); } } } static void sisfb_set_TVyposoffset(struct sis_video_info *ivideo, int val) { if(val > 32) val = 32; if(val < -32) val = -32; ivideo->tvypos = val; if(ivideo->sisfblocked) return; if(!ivideo->modechanged) return; if(ivideo->currentvbflags & CRT2_TV) { if(ivideo->vbflags2 & VB2_CHRONTEL) { int y = ivideo->tvy; switch(ivideo->chronteltype) { case 1: y -= val; if(y < 0) y = 0; outSISIDXREG(SISSR,0x05,0x86); SiS_SetCH700x(&ivideo->SiS_Pr, 0x0b, (y & 0xff)); SiS_SetCH70xxANDOR(&ivideo->SiS_Pr, 0x08, ((y & 0x0100) >> 8), 0xFE); break; case 2: /* Not supported by hardware */ break; } } else if(ivideo->vbflags2 & VB2_SISBRIDGE) { char p2_01, p2_02; val /= 2; p2_01 = ivideo->p2_01; p2_02 = ivideo->p2_02; p2_01 += val; p2_02 += val; if(!(ivideo->currentvbflags & (TV_HIVISION | TV_YPBPR))) { while((p2_01 <= 0) || (p2_02 <= 0)) { p2_01 += 2; p2_02 += 2; } } outSISIDXREG(SISPART2,0x01,p2_01); outSISIDXREG(SISPART2,0x02,p2_02); } } } static void sisfb_post_setmode(struct sis_video_info *ivideo) { bool crt1isoff = false; bool doit = true; #if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) u8 reg; #endif #ifdef CONFIG_FB_SIS_315 u8 reg1; #endif outSISIDXREG(SISSR, 0x05, 0x86); #ifdef CONFIG_FB_SIS_315 sisfb_fixup_SR11(ivideo); #endif /* Now we actually HAVE changed the display mode */ ivideo->modechanged = 1; /* We can't switch off CRT1 if bridge is in slave mode */ if(ivideo->vbflags2 & VB2_VIDEOBRIDGE) { if(sisfb_bridgeisslave(ivideo)) doit = false; } else ivideo->sisfb_crt1off = 0; #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { if((ivideo->sisfb_crt1off) && (doit)) { crt1isoff = true; reg = 0x00; } else { crt1isoff = false; reg = 0x80; } setSISIDXREG(SISCR, 0x17, 0x7f, reg); } #endif #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { if((ivideo->sisfb_crt1off) && (doit)) { crt1isoff = true; reg = 0x40; reg1 = 0xc0; } else { crt1isoff = false; reg = 0x00; reg1 = 0x00; } setSISIDXREG(SISCR, ivideo->SiS_Pr.SiS_MyCR63, ~0x40, reg); setSISIDXREG(SISSR, 0x1f, ~0xc0, reg1); } #endif if(crt1isoff) { ivideo->currentvbflags &= ~VB_DISPTYPE_CRT1; ivideo->currentvbflags |= VB_SINGLE_MODE; } else { ivideo->currentvbflags |= VB_DISPTYPE_CRT1; if(ivideo->currentvbflags & VB_DISPTYPE_CRT2) { ivideo->currentvbflags |= VB_MIRROR_MODE; } else { ivideo->currentvbflags |= VB_SINGLE_MODE; } } andSISIDXREG(SISSR, IND_SIS_RAMDAC_CONTROL, ~0x04); if(ivideo->currentvbflags & CRT2_TV) { if(ivideo->vbflags2 & VB2_SISBRIDGE) { inSISIDXREG(SISPART2,0x1f,ivideo->p2_1f); inSISIDXREG(SISPART2,0x20,ivideo->p2_20); inSISIDXREG(SISPART2,0x2b,ivideo->p2_2b); inSISIDXREG(SISPART2,0x42,ivideo->p2_42); inSISIDXREG(SISPART2,0x43,ivideo->p2_43); inSISIDXREG(SISPART2,0x01,ivideo->p2_01); inSISIDXREG(SISPART2,0x02,ivideo->p2_02); } else if(ivideo->vbflags2 & VB2_CHRONTEL) { if(ivideo->chronteltype == 1) { ivideo->tvx = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0a); ivideo->tvx |= (((SiS_GetCH700x(&ivideo->SiS_Pr, 0x08) & 0x02) >> 1) << 8); ivideo->tvy = SiS_GetCH700x(&ivideo->SiS_Pr, 0x0b); ivideo->tvy |= ((SiS_GetCH700x(&ivideo->SiS_Pr, 0x08) & 0x01) << 8); } } } if(ivideo->tvxpos) { sisfb_set_TVxposoffset(ivideo, ivideo->tvxpos); } if(ivideo->tvypos) { sisfb_set_TVyposoffset(ivideo, ivideo->tvypos); } /* Eventually sync engines */ sisfb_check_engine_and_sync(ivideo); /* (Re-)Initialize chip engines */ if(ivideo->accel) { sisfb_engine_init(ivideo); } else { ivideo->engineok = 0; } } static int sisfb_reset_mode(struct sis_video_info *ivideo) { if(sisfb_set_mode(ivideo, 0)) return 1; sisfb_set_pitch(ivideo); sisfb_set_base_CRT1(ivideo, ivideo->current_base); sisfb_set_base_CRT2(ivideo, ivideo->current_base); return 0; } static void sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_command) { int mycrt1off; switch(sisfb_command->sisfb_cmd) { case SISFB_CMD_GETVBFLAGS: if(!ivideo->modechanged) { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_EARLY; } else { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OK; sisfb_command->sisfb_result[1] = ivideo->currentvbflags; sisfb_command->sisfb_result[2] = ivideo->vbflags2; } break; case SISFB_CMD_SWITCHCRT1: /* arg[0]: 0 = off, 1 = on, 99 = query */ if(!ivideo->modechanged) { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_EARLY; } else if(sisfb_command->sisfb_arg[0] == 99) { /* Query */ sisfb_command->sisfb_result[1] = ivideo->sisfb_crt1off ? 0 : 1; sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OK; } else if(ivideo->sisfblocked) { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_LOCKED; } else if((!(ivideo->currentvbflags & CRT2_ENABLE)) && (sisfb_command->sisfb_arg[0] == 0)) { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_NOCRT2; } else { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OK; mycrt1off = sisfb_command->sisfb_arg[0] ? 0 : 1; if( ((ivideo->currentvbflags & VB_DISPTYPE_CRT1) && mycrt1off) || ((!(ivideo->currentvbflags & VB_DISPTYPE_CRT1)) && !mycrt1off) ) { ivideo->sisfb_crt1off = mycrt1off; if(sisfb_reset_mode(ivideo)) { sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_OTHER; } } sisfb_command->sisfb_result[1] = ivideo->sisfb_crt1off ? 0 : 1; } break; /* more to come */ default: sisfb_command->sisfb_result[0] = SISFB_CMD_ERR_UNKNOWN; printk(KERN_ERR "sisfb: Unknown command 0x%x\n", sisfb_command->sisfb_cmd); } } #ifndef MODULE static int __init sisfb_setup(char *options) { char *this_opt; sisfb_setdefaultparms(); if(!options || !(*options)) return 0; while((this_opt = strsep(&options, ",")) != NULL) { if(!(*this_opt)) continue; if(!strnicmp(this_opt, "off", 3)) { sisfb_off = 1; } else if(!strnicmp(this_opt, "forcecrt2type:", 14)) { /* Need to check crt2 type first for fstn/dstn */ sisfb_search_crt2type(this_opt + 14); } else if(!strnicmp(this_opt, "tvmode:",7)) { sisfb_search_tvstd(this_opt + 7); } else if(!strnicmp(this_opt, "tvstandard:",11)) { sisfb_search_tvstd(this_opt + 11); } else if(!strnicmp(this_opt, "mode:", 5)) { sisfb_search_mode(this_opt + 5, false); } else if(!strnicmp(this_opt, "vesa:", 5)) { sisfb_search_vesamode(simple_strtoul(this_opt + 5, NULL, 0), false); } else if(!strnicmp(this_opt, "rate:", 5)) { sisfb_parm_rate = simple_strtoul(this_opt + 5, NULL, 0); } else if(!strnicmp(this_opt, "forcecrt1:", 10)) { sisfb_forcecrt1 = (int)simple_strtoul(this_opt + 10, NULL, 0); } else if(!strnicmp(this_opt, "mem:",4)) { sisfb_parm_mem = simple_strtoul(this_opt + 4, NULL, 0); } else if(!strnicmp(this_opt, "pdc:", 4)) { sisfb_pdc = simple_strtoul(this_opt + 4, NULL, 0); } else if(!strnicmp(this_opt, "pdc1:", 5)) { sisfb_pdca = simple_strtoul(this_opt + 5, NULL, 0); } else if(!strnicmp(this_opt, "noaccel", 7)) { sisfb_accel = 0; } else if(!strnicmp(this_opt, "accel", 5)) { sisfb_accel = -1; } else if(!strnicmp(this_opt, "noypan", 6)) { sisfb_ypan = 0; } else if(!strnicmp(this_opt, "ypan", 4)) { sisfb_ypan = -1; } else if(!strnicmp(this_opt, "nomax", 5)) { sisfb_max = 0; } else if(!strnicmp(this_opt, "max", 3)) { sisfb_max = -1; } else if(!strnicmp(this_opt, "userom:", 7)) { sisfb_userom = (int)simple_strtoul(this_opt + 7, NULL, 0); } else if(!strnicmp(this_opt, "useoem:", 7)) { sisfb_useoem = (int)simple_strtoul(this_opt + 7, NULL, 0); } else if(!strnicmp(this_opt, "nocrt2rate", 10)) { sisfb_nocrt2rate = 1; } else if(!strnicmp(this_opt, "scalelcd:", 9)) { unsigned long temp = 2; temp = simple_strtoul(this_opt + 9, NULL, 0); if((temp == 0) || (temp == 1)) { sisfb_scalelcd = temp ^ 1; } } else if(!strnicmp(this_opt, "tvxposoffset:", 13)) { int temp = 0; temp = (int)simple_strtol(this_opt + 13, NULL, 0); if((temp >= -32) && (temp <= 32)) { sisfb_tvxposoffset = temp; } } else if(!strnicmp(this_opt, "tvyposoffset:", 13)) { int temp = 0; temp = (int)simple_strtol(this_opt + 13, NULL, 0); if((temp >= -32) && (temp <= 32)) { sisfb_tvyposoffset = temp; } } else if(!strnicmp(this_opt, "specialtiming:", 14)) { sisfb_search_specialtiming(this_opt + 14); } else if(!strnicmp(this_opt, "lvdshl:", 7)) { int temp = 4; temp = simple_strtoul(this_opt + 7, NULL, 0); if((temp >= 0) && (temp <= 3)) { sisfb_lvdshl = temp; } } else if(this_opt[0] >= '0' && this_opt[0] <= '9') { sisfb_search_mode(this_opt, true); #if !defined(__i386__) && !defined(__x86_64__) } else if(!strnicmp(this_opt, "resetcard", 9)) { sisfb_resetcard = 1; } else if(!strnicmp(this_opt, "videoram:", 9)) { sisfb_videoram = simple_strtoul(this_opt + 9, NULL, 0); #endif } else { printk(KERN_INFO "sisfb: Invalid option %s\n", this_opt); } } return 0; } #endif static int __devinit sisfb_check_rom(void __iomem *rom_base, struct sis_video_info *ivideo) { void __iomem *rom; int romptr; if((readb(rom_base) != 0x55) || (readb(rom_base + 1) != 0xaa)) return 0; romptr = (readb(rom_base + 0x18) | (readb(rom_base + 0x19) << 8)); if(romptr > (0x10000 - 8)) return 0; rom = rom_base + romptr; if((readb(rom) != 'P') || (readb(rom + 1) != 'C') || (readb(rom + 2) != 'I') || (readb(rom + 3) != 'R')) return 0; if((readb(rom + 4) | (readb(rom + 5) << 8)) != ivideo->chip_vendor) return 0; if((readb(rom + 6) | (readb(rom + 7) << 8)) != ivideo->chip_id) return 0; return 1; } static unsigned char * __devinit sisfb_find_rom(struct pci_dev *pdev) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); void __iomem *rom_base; unsigned char *myrombase = NULL; u32 temp; size_t romsize; /* First, try the official pci ROM functions (except * on integrated chipsets which have no ROM). */ if(!ivideo->nbridge) { if((rom_base = pci_map_rom(pdev, &romsize))) { if(sisfb_check_rom(rom_base, ivideo)) { if((myrombase = vmalloc(65536))) { /* Work around bug in pci/rom.c: Folks forgot to check * whether the size retrieved from the BIOS image eventually * is larger than the mapped size */ if(pci_resource_len(pdev, PCI_ROM_RESOURCE) < romsize) romsize = pci_resource_len(pdev, PCI_ROM_RESOURCE); memcpy_fromio(myrombase, rom_base, (romsize > 65536) ? 65536 : romsize); } } pci_unmap_rom(pdev, rom_base); } } if(myrombase) return myrombase; /* Otherwise do it the conventional way. */ #if defined(__i386__) || defined(__x86_64__) for(temp = 0x000c0000; temp < 0x000f0000; temp += 0x00001000) { rom_base = ioremap(temp, 65536); if(!rom_base) continue; if(!sisfb_check_rom(rom_base, ivideo)) { iounmap(rom_base); continue; } if((myrombase = vmalloc(65536))) memcpy_fromio(myrombase, rom_base, 65536); iounmap(rom_base); break; } #else pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &temp); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, (ivideo->video_base & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); rom_base = ioremap(ivideo->video_base, 65536); if(rom_base) { if(sisfb_check_rom(rom_base, ivideo)) { if((myrombase = vmalloc(65536))) memcpy_fromio(myrombase, rom_base, 65536); } iounmap(rom_base); } pci_write_config_dword(pdev, PCI_ROM_ADDRESS, temp); #endif return myrombase; } static void __devinit sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, unsigned int min) { ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); if(!ivideo->video_vbase) { printk(KERN_ERR "sisfb: Unable to map maximum video RAM for size detection\n"); (*mapsize) >>= 1; while((!(ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize))))) { (*mapsize) >>= 1; if((*mapsize) < (min << 20)) break; } if(ivideo->video_vbase) { printk(KERN_ERR "sisfb: Video RAM size detection limited to %dMB\n", (int)((*mapsize) >> 20)); } } } #ifdef CONFIG_FB_SIS_300 static int __devinit sisfb_post_300_buswidth(struct sis_video_info *ivideo) { void __iomem *FBAddress = ivideo->video_vbase; unsigned short temp; unsigned char reg; int i, j; andSISIDXREG(SISSR, 0x15, 0xFB); orSISIDXREG(SISSR, 0x15, 0x04); outSISIDXREG(SISSR, 0x13, 0x00); outSISIDXREG(SISSR, 0x14, 0xBF); for(i = 0; i < 2; i++) { temp = 0x1234; for(j = 0; j < 4; j++) { writew(temp, FBAddress); if(readw(FBAddress) == temp) break; orSISIDXREG(SISSR, 0x3c, 0x01); inSISIDXREG(SISSR, 0x05, reg); inSISIDXREG(SISSR, 0x05, reg); andSISIDXREG(SISSR, 0x3c, 0xfe); inSISIDXREG(SISSR, 0x05, reg); inSISIDXREG(SISSR, 0x05, reg); temp++; } } writel(0x01234567L, FBAddress); writel(0x456789ABL, (FBAddress + 4)); writel(0x89ABCDEFL, (FBAddress + 8)); writel(0xCDEF0123L, (FBAddress + 12)); inSISIDXREG(SISSR, 0x3b, reg); if(reg & 0x01) { if(readl((FBAddress + 12)) == 0xCDEF0123L) return 4; /* Channel A 128bit */ } if(readl((FBAddress + 4)) == 0x456789ABL) return 2; /* Channel B 64bit */ return 1; /* 32bit */ } static int __devinit sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth, int PseudoRankCapacity, int PseudoAdrPinCount, unsigned int mapsize) { void __iomem *FBAddr = ivideo->video_vbase; unsigned short sr14; unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid; unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage; static const unsigned short SiS_DRAMType[17][5] = { {0x0C,0x0A,0x02,0x40,0x39}, {0x0D,0x0A,0x01,0x40,0x48}, {0x0C,0x09,0x02,0x20,0x35}, {0x0D,0x09,0x01,0x20,0x44}, {0x0C,0x08,0x02,0x10,0x31}, {0x0D,0x08,0x01,0x10,0x40}, {0x0C,0x0A,0x01,0x20,0x34}, {0x0C,0x09,0x01,0x08,0x32}, {0x0B,0x08,0x02,0x08,0x21}, {0x0C,0x08,0x01,0x08,0x30}, {0x0A,0x08,0x02,0x04,0x11}, {0x0B,0x0A,0x01,0x10,0x28}, {0x09,0x08,0x02,0x02,0x01}, {0x0B,0x09,0x01,0x08,0x24}, {0x0B,0x08,0x01,0x04,0x20}, {0x0A,0x08,0x01,0x02,0x10}, {0x09,0x08,0x01,0x01,0x00} }; for(k = 0; k <= 16; k++) { RankCapacity = buswidth * SiS_DRAMType[k][3]; if(RankCapacity != PseudoRankCapacity) continue; if((SiS_DRAMType[k][2] + SiS_DRAMType[k][0]) > PseudoAdrPinCount) continue; BankNumHigh = RankCapacity * 16 * iteration - 1; if(iteration == 3) { /* Rank No */ BankNumMid = RankCapacity * 16 - 1; } else { BankNumMid = RankCapacity * 16 * iteration / 2 - 1; } PageCapacity = (1 << SiS_DRAMType[k][1]) * buswidth * 4; PhysicalAdrHigh = BankNumHigh; PhysicalAdrHalfPage = (PageCapacity / 2 + PhysicalAdrHigh) % PageCapacity; PhysicalAdrOtherPage = PageCapacity * SiS_DRAMType[k][2] + PhysicalAdrHigh; andSISIDXREG(SISSR, 0x15, 0xFB); /* Test */ orSISIDXREG(SISSR, 0x15, 0x04); /* Test */ sr14 = (SiS_DRAMType[k][3] * buswidth) - 1; if(buswidth == 4) sr14 |= 0x80; else if(buswidth == 2) sr14 |= 0x40; outSISIDXREG(SISSR, 0x13, SiS_DRAMType[k][4]); outSISIDXREG(SISSR, 0x14, sr14); BankNumHigh <<= 16; BankNumMid <<= 16; if((BankNumHigh + PhysicalAdrHigh >= mapsize) || (BankNumMid + PhysicalAdrHigh >= mapsize) || (BankNumHigh + PhysicalAdrHalfPage >= mapsize) || (BankNumHigh + PhysicalAdrOtherPage >= mapsize)) continue; /* Write data */ writew(((unsigned short)PhysicalAdrHigh), (FBAddr + BankNumHigh + PhysicalAdrHigh)); writew(((unsigned short)BankNumMid), (FBAddr + BankNumMid + PhysicalAdrHigh)); writew(((unsigned short)PhysicalAdrHalfPage), (FBAddr + BankNumHigh + PhysicalAdrHalfPage)); writew(((unsigned short)PhysicalAdrOtherPage), (FBAddr + BankNumHigh + PhysicalAdrOtherPage)); /* Read data */ if(readw(FBAddr + BankNumHigh + PhysicalAdrHigh) == PhysicalAdrHigh) return 1; } return 0; } static void __devinit sisfb_post_300_ramsize(struct pci_dev *pdev, unsigned int mapsize) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); int i, j, buswidth; int PseudoRankCapacity, PseudoAdrPinCount; buswidth = sisfb_post_300_buswidth(ivideo); for(i = 6; i >= 0; i--) { PseudoRankCapacity = 1 << i; for(j = 4; j >= 1; j--) { PseudoAdrPinCount = 15 - j; if((PseudoRankCapacity * j) <= 64) { if(sisfb_post_300_rwtest(ivideo, j, buswidth, PseudoRankCapacity, PseudoAdrPinCount, mapsize)) return; } } } } static void __devinit sisfb_post_sis300(struct pci_dev *pdev) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); unsigned char *bios = ivideo->SiS_Pr.VirtualRomBase; u8 reg, v1, v2, v3, v4, v5, v6, v7, v8; u16 index, rindex, memtype = 0; unsigned int mapsize; if(!ivideo->SiS_Pr.UseROM) bios = NULL; outSISIDXREG(SISSR, 0x05, 0x86); if(bios) { if(bios[0x52] & 0x80) { memtype = bios[0x52]; } else { inSISIDXREG(SISSR, 0x3a, memtype); } memtype &= 0x07; } v3 = 0x80; v6 = 0x80; if(ivideo->revision_id <= 0x13) { v1 = 0x44; v2 = 0x42; v4 = 0x44; v5 = 0x42; } else { v1 = 0x68; v2 = 0x43; /* Assume 125Mhz MCLK */ v4 = 0x68; v5 = 0x43; /* Assume 125Mhz ECLK */ if(bios) { index = memtype * 5; rindex = index + 0x54; v1 = bios[rindex++]; v2 = bios[rindex++]; v3 = bios[rindex++]; rindex = index + 0x7c; v4 = bios[rindex++]; v5 = bios[rindex++]; v6 = bios[rindex++]; } } outSISIDXREG(SISSR, 0x28, v1); outSISIDXREG(SISSR, 0x29, v2); outSISIDXREG(SISSR, 0x2a, v3); outSISIDXREG(SISSR, 0x2e, v4); outSISIDXREG(SISSR, 0x2f, v5); outSISIDXREG(SISSR, 0x30, v6); v1 = 0x10; if(bios) v1 = bios[0xa4]; outSISIDXREG(SISSR, 0x07, v1); /* DAC speed */ outSISIDXREG(SISSR, 0x11, 0x0f); /* DDC, power save */ v1 = 0x01; v2 = 0x43; v3 = 0x1e; v4 = 0x2a; v5 = 0x06; v6 = 0x00; v7 = 0x00; v8 = 0x00; if(bios) { memtype += 0xa5; v1 = bios[memtype]; v2 = bios[memtype + 8]; v3 = bios[memtype + 16]; v4 = bios[memtype + 24]; v5 = bios[memtype + 32]; v6 = bios[memtype + 40]; v7 = bios[memtype + 48]; v8 = bios[memtype + 56]; } if(ivideo->revision_id >= 0x80) v3 &= 0xfd; outSISIDXREG(SISSR, 0x15, v1); /* Ram type (assuming 0, BIOS 0xa5 step 8) */ outSISIDXREG(SISSR, 0x16, v2); outSISIDXREG(SISSR, 0x17, v3); outSISIDXREG(SISSR, 0x18, v4); outSISIDXREG(SISSR, 0x19, v5); outSISIDXREG(SISSR, 0x1a, v6); outSISIDXREG(SISSR, 0x1b, v7); outSISIDXREG(SISSR, 0x1c, v8); /* ---- */ andSISIDXREG(SISSR, 0x15 ,0xfb); orSISIDXREG(SISSR, 0x15, 0x04); if(bios) { if(bios[0x53] & 0x02) { orSISIDXREG(SISSR, 0x19, 0x20); } } v1 = 0x04; /* DAC pedestal (BIOS 0xe5) */ if(ivideo->revision_id >= 0x80) v1 |= 0x01; outSISIDXREG(SISSR, 0x1f, v1); outSISIDXREG(SISSR, 0x20, 0xa4); /* linear & relocated io & disable a0000 */ v1 = 0xf6; v2 = 0x0d; v3 = 0x00; if(bios) { v1 = bios[0xe8]; v2 = bios[0xe9]; v3 = bios[0xea]; } outSISIDXREG(SISSR, 0x23, v1); outSISIDXREG(SISSR, 0x24, v2); outSISIDXREG(SISSR, 0x25, v3); outSISIDXREG(SISSR, 0x21, 0x84); outSISIDXREG(SISSR, 0x22, 0x00); outSISIDXREG(SISCR, 0x37, 0x00); orSISIDXREG(SISPART1, 0x24, 0x01); /* unlock crt2 */ outSISIDXREG(SISPART1, 0x00, 0x00); v1 = 0x40; v2 = 0x11; if(bios) { v1 = bios[0xec]; v2 = bios[0xeb]; } outSISIDXREG(SISPART1, 0x02, v1); if(ivideo->revision_id >= 0x80) v2 &= ~0x01; inSISIDXREG(SISPART4, 0x00, reg); if((reg == 1) || (reg == 2)) { outSISIDXREG(SISCR, 0x37, 0x02); outSISIDXREG(SISPART2, 0x00, 0x1c); v4 = 0x00; v5 = 0x00; v6 = 0x10; if(ivideo->SiS_Pr.UseROM) { v4 = bios[0xf5]; v5 = bios[0xf6]; v6 = bios[0xf7]; } outSISIDXREG(SISPART4, 0x0d, v4); outSISIDXREG(SISPART4, 0x0e, v5); outSISIDXREG(SISPART4, 0x10, v6); outSISIDXREG(SISPART4, 0x0f, 0x3f); inSISIDXREG(SISPART4, 0x01, reg); if(reg >= 0xb0) { inSISIDXREG(SISPART4, 0x23, reg); reg &= 0x20; reg <<= 1; outSISIDXREG(SISPART4, 0x23, reg); } } else { v2 &= ~0x10; } outSISIDXREG(SISSR, 0x32, v2); andSISIDXREG(SISPART1, 0x24, 0xfe); /* Lock CRT2 */ inSISIDXREG(SISSR, 0x16, reg); reg &= 0xc3; outSISIDXREG(SISCR, 0x35, reg); outSISIDXREG(SISCR, 0x83, 0x00); #if !defined(__i386__) && !defined(__x86_64__) if(sisfb_videoram) { outSISIDXREG(SISSR, 0x13, 0x28); /* ? */ reg = ((sisfb_videoram >> 10) - 1) | 0x40; outSISIDXREG(SISSR, 0x14, reg); } else { #endif /* Need to map max FB size for finding out about RAM size */ mapsize = 64 << 20; sisfb_post_map_vram(ivideo, &mapsize, 4); if(ivideo->video_vbase) { sisfb_post_300_ramsize(pdev, mapsize); iounmap(ivideo->video_vbase); } else { printk(KERN_DEBUG "sisfb: Failed to map memory for size detection, assuming 8MB\n"); outSISIDXREG(SISSR, 0x13, 0x28); /* ? */ outSISIDXREG(SISSR, 0x14, 0x47); /* 8MB, 64bit default */ } #if !defined(__i386__) && !defined(__x86_64__) } #endif if(bios) { v1 = bios[0xe6]; v2 = bios[0xe7]; } else { inSISIDXREG(SISSR, 0x3a, reg); if((reg & 0x30) == 0x30) { v1 = 0x04; /* PCI */ v2 = 0x92; } else { v1 = 0x14; /* AGP */ v2 = 0xb2; } } outSISIDXREG(SISSR, 0x21, v1); outSISIDXREG(SISSR, 0x22, v2); /* Sense CRT1 */ sisfb_sense_crt1(ivideo); /* Set default mode, don't clear screen */ ivideo->SiS_Pr.SiS_UseOEM = false; SiS_SetEnableDstn(&ivideo->SiS_Pr, false); SiS_SetEnableFstn(&ivideo->SiS_Pr, false); ivideo->curFSTN = ivideo->curDSTN = 0; ivideo->SiS_Pr.VideoMemorySize = 8 << 20; SiSSetMode(&ivideo->SiS_Pr, 0x2e | 0x80); outSISIDXREG(SISSR, 0x05, 0x86); /* Display off */ orSISIDXREG(SISSR, 0x01, 0x20); /* Save mode number in CR34 */ outSISIDXREG(SISCR, 0x34, 0x2e); /* Let everyone know what the current mode is */ ivideo->modeprechange = 0x2e; } #endif #ifdef CONFIG_FB_SIS_315 #if 0 static void __devinit sisfb_post_sis315330(struct pci_dev *pdev) { /* TODO */ } #endif static void __devinit sisfb_post_xgi_delay(struct sis_video_info *ivideo, int delay) { unsigned int i; u8 reg; for(i = 0; i <= (delay * 10 * 36); i++) { inSISIDXREG(SISSR, 0x05, reg); reg++; } } static int __devinit sisfb_find_host_bridge(struct sis_video_info *ivideo, struct pci_dev *mypdev, unsigned short pcivendor) { struct pci_dev *pdev = NULL; unsigned short temp; int ret = 0; while((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST, pdev))) { temp = pdev->vendor; if(temp == pcivendor) { ret = 1; pci_dev_put(pdev); break; } } return ret; } static int __devinit sisfb_post_xgi_rwtest(struct sis_video_info *ivideo, int starta, unsigned int enda, unsigned int mapsize) { unsigned int pos; int i; writel(0, ivideo->video_vbase); for(i = starta; i <= enda; i++) { pos = 1 << i; if(pos < mapsize) writel(pos, ivideo->video_vbase + pos); } sisfb_post_xgi_delay(ivideo, 150); if(readl(ivideo->video_vbase) != 0) return 0; for(i = starta; i <= enda; i++) { pos = 1 << i; if(pos < mapsize) { if(readl(ivideo->video_vbase + pos) != pos) return 0; } else return 0; } return 1; } static void __devinit sisfb_post_xgi_ramsize(struct sis_video_info *ivideo) { unsigned int buswidth, ranksize, channelab, mapsize; int i, j, k, l; u8 reg, sr14; static const u8 dramsr13[12 * 5] = { 0x02, 0x0e, 0x0b, 0x80, 0x5d, 0x02, 0x0e, 0x0a, 0x40, 0x59, 0x02, 0x0d, 0x0b, 0x40, 0x4d, 0x02, 0x0e, 0x09, 0x20, 0x55, 0x02, 0x0d, 0x0a, 0x20, 0x49, 0x02, 0x0c, 0x0b, 0x20, 0x3d, 0x02, 0x0e, 0x08, 0x10, 0x51, 0x02, 0x0d, 0x09, 0x10, 0x45, 0x02, 0x0c, 0x0a, 0x10, 0x39, 0x02, 0x0d, 0x08, 0x08, 0x41, 0x02, 0x0c, 0x09, 0x08, 0x35, 0x02, 0x0c, 0x08, 0x04, 0x31 }; static const u8 dramsr13_4[4 * 5] = { 0x02, 0x0d, 0x09, 0x40, 0x45, 0x02, 0x0c, 0x09, 0x20, 0x35, 0x02, 0x0c, 0x08, 0x10, 0x31, 0x02, 0x0b, 0x08, 0x08, 0x21 }; /* Enable linear mode, disable 0xa0000 address decoding */ /* We disable a0000 address decoding, because * - if running on x86, if the card is disabled, it means * that another card is in the system. We don't want * to interphere with that primary card's textmode. * - if running on non-x86, there usually is no VGA window * at a0000. */ orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); /* Need to map max FB size for finding out about RAM size */ mapsize = 256 << 20; sisfb_post_map_vram(ivideo, &mapsize, 32); if(!ivideo->video_vbase) { printk(KERN_ERR "sisfb: Unable to detect RAM size. Setting default.\n"); outSISIDXREG(SISSR, 0x13, 0x35); outSISIDXREG(SISSR, 0x14, 0x41); /* TODO */ return; } /* Non-interleaving */ outSISIDXREG(SISSR, 0x15, 0x00); /* No tiling */ outSISIDXREG(SISSR, 0x1c, 0x00); if(ivideo->chip == XGI_20) { channelab = 1; inSISIDXREG(SISCR, 0x97, reg); if(!(reg & 0x01)) { /* Single 32/16 */ buswidth = 32; outSISIDXREG(SISSR, 0x13, 0xb1); outSISIDXREG(SISSR, 0x14, 0x52); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x02; if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x31); outSISIDXREG(SISSR, 0x14, 0x42); sisfb_post_xgi_delay(ivideo, 1); if(sisfb_post_xgi_rwtest(ivideo, 23, 23, mapsize)) goto bail_out; buswidth = 16; outSISIDXREG(SISSR, 0x13, 0xb1); outSISIDXREG(SISSR, 0x14, 0x41); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x01; if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize)) goto bail_out; else outSISIDXREG(SISSR, 0x13, 0x31); } else { /* Dual 16/8 */ buswidth = 16; outSISIDXREG(SISSR, 0x13, 0xb1); outSISIDXREG(SISSR, 0x14, 0x41); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x01; if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x31); outSISIDXREG(SISSR, 0x14, 0x31); sisfb_post_xgi_delay(ivideo, 1); if(sisfb_post_xgi_rwtest(ivideo, 22, 22, mapsize)) goto bail_out; buswidth = 8; outSISIDXREG(SISSR, 0x13, 0xb1); outSISIDXREG(SISSR, 0x14, 0x30); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x00; if(sisfb_post_xgi_rwtest(ivideo, 21, 22, mapsize)) goto bail_out; else outSISIDXREG(SISSR, 0x13, 0x31); } } else { /* XGI_40 */ inSISIDXREG(SISCR, 0x97, reg); if(!(reg & 0x10)) { inSISIDXREG(SISSR, 0x39, reg); reg >>= 1; } if(reg & 0x01) { /* DDRII */ buswidth = 32; if(ivideo->revision_id == 2) { channelab = 2; outSISIDXREG(SISSR, 0x13, 0xa1); outSISIDXREG(SISSR, 0x14, 0x44); sr14 = 0x04; sisfb_post_xgi_delay(ivideo, 1); if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x21); outSISIDXREG(SISSR, 0x14, 0x34); if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize)) goto bail_out; channelab = 1; outSISIDXREG(SISSR, 0x13, 0xa1); outSISIDXREG(SISSR, 0x14, 0x40); sr14 = 0x00; if(sisfb_post_xgi_rwtest(ivideo, 22, 23, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x21); outSISIDXREG(SISSR, 0x14, 0x30); } else { channelab = 3; outSISIDXREG(SISSR, 0x13, 0xa1); outSISIDXREG(SISSR, 0x14, 0x4c); sr14 = 0x0c; sisfb_post_xgi_delay(ivideo, 1); if(sisfb_post_xgi_rwtest(ivideo, 23, 25, mapsize)) goto bail_out; channelab = 2; outSISIDXREG(SISSR, 0x14, 0x48); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x08; if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x21); outSISIDXREG(SISSR, 0x14, 0x3c); sr14 = 0x0c; if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize)) { channelab = 3; } else { channelab = 2; outSISIDXREG(SISSR, 0x14, 0x38); sr14 = 0x08; } } sisfb_post_xgi_delay(ivideo, 1); } else { /* DDR */ buswidth = 64; if(ivideo->revision_id == 2) { channelab = 1; outSISIDXREG(SISSR, 0x13, 0xa1); outSISIDXREG(SISSR, 0x14, 0x52); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x02; if(sisfb_post_xgi_rwtest(ivideo, 23, 24, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x21); outSISIDXREG(SISSR, 0x14, 0x42); } else { channelab = 2; outSISIDXREG(SISSR, 0x13, 0xa1); outSISIDXREG(SISSR, 0x14, 0x5a); sisfb_post_xgi_delay(ivideo, 1); sr14 = 0x0a; if(sisfb_post_xgi_rwtest(ivideo, 24, 25, mapsize)) goto bail_out; outSISIDXREG(SISSR, 0x13, 0x21); outSISIDXREG(SISSR, 0x14, 0x4a); } sisfb_post_xgi_delay(ivideo, 1); } } bail_out: setSISIDXREG(SISSR, 0x14, 0xf0, sr14); sisfb_post_xgi_delay(ivideo, 1); j = (ivideo->chip == XGI_20) ? 5 : 9; k = (ivideo->chip == XGI_20) ? 12 : 4; for(i = 0; i < k; i++) { reg = (ivideo->chip == XGI_20) ? dramsr13[(i * 5) + 4] : dramsr13_4[(i * 5) + 4]; setSISIDXREG(SISSR, 0x13, 0x80, reg); sisfb_post_xgi_delay(ivideo, 50); ranksize = (ivideo->chip == XGI_20) ? dramsr13[(i * 5) + 3] : dramsr13_4[(i * 5) + 3]; inSISIDXREG(SISSR, 0x13, reg); if(reg & 0x80) ranksize <<= 1; if(ivideo->chip == XGI_20) { if(buswidth == 16) ranksize <<= 1; else if(buswidth == 32) ranksize <<= 2; } else { if(buswidth == 64) ranksize <<= 1; } reg = 0; l = channelab; if(l == 3) l = 4; if((ranksize * l) <= 256) { while((ranksize >>= 1)) reg += 0x10; } if(!reg) continue; setSISIDXREG(SISSR, 0x14, 0x0f, (reg & 0xf0)); sisfb_post_xgi_delay(ivideo, 1); if(sisfb_post_xgi_rwtest(ivideo, j, ((reg >> 4) + channelab - 2 + 20), mapsize)) break; } iounmap(ivideo->video_vbase); } static void __devinit sisfb_post_xgi_setclocks(struct sis_video_info *ivideo, u8 regb) { u8 v1, v2, v3; int index; static const u8 cs90[8 * 3] = { 0x16, 0x01, 0x01, 0x3e, 0x03, 0x01, 0x7c, 0x08, 0x01, 0x79, 0x06, 0x01, 0x29, 0x01, 0x81, 0x5c, 0x23, 0x01, 0x5c, 0x23, 0x01, 0x5c, 0x23, 0x01 }; static const u8 csb8[8 * 3] = { 0x5c, 0x23, 0x01, 0x29, 0x01, 0x01, 0x7c, 0x08, 0x01, 0x79, 0x06, 0x01, 0x29, 0x01, 0x81, 0x5c, 0x23, 0x01, 0x5c, 0x23, 0x01, 0x5c, 0x23, 0x01 }; regb = 0; /* ! */ index = regb * 3; v1 = cs90[index]; v2 = cs90[index + 1]; v3 = cs90[index + 2]; if(ivideo->haveXGIROM) { v1 = ivideo->bios_abase[0x90 + index]; v2 = ivideo->bios_abase[0x90 + index + 1]; v3 = ivideo->bios_abase[0x90 + index + 2]; } outSISIDXREG(SISSR, 0x28, v1); outSISIDXREG(SISSR, 0x29, v2); outSISIDXREG(SISSR, 0x2a, v3); sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); index = regb * 3; v1 = csb8[index]; v2 = csb8[index + 1]; v3 = csb8[index + 2]; if(ivideo->haveXGIROM) { v1 = ivideo->bios_abase[0xb8 + index]; v2 = ivideo->bios_abase[0xb8 + index + 1]; v3 = ivideo->bios_abase[0xb8 + index + 2]; } outSISIDXREG(SISSR, 0x2e, v1); outSISIDXREG(SISSR, 0x2f, v2); outSISIDXREG(SISSR, 0x30, v3); sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); } static int __devinit sisfb_post_xgi(struct pci_dev *pdev) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); unsigned char *bios = ivideo->bios_abase; struct pci_dev *mypdev = NULL; const u8 *ptr, *ptr2; u8 v1, v2, v3, v4, v5, reg, ramtype; u32 rega, regb, regd; int i, j, k, index; static const u8 cs78[3] = { 0xf6, 0x0d, 0x00 }; static const u8 cs76[2] = { 0xa3, 0xfb }; static const u8 cs7b[3] = { 0xc0, 0x11, 0x00 }; static const u8 cs158[8] = { 0x88, 0xaa, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs160[8] = { 0x44, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs168[8] = { 0x48, 0x78, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs128[3 * 8] = { 0x90, 0x28, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs148[2 * 8] = { 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs31a[8 * 4] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs33a[8 * 4] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs45a[8 * 2] = { 0x00, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs170[7 * 8] = { 0x54, 0x32, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x43, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x05, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x34, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs1a8[3 * 8] = { 0xf0, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cs100[2 * 8] = { 0xc4, 0x04, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x04, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* VGA enable */ reg = inSISREG(SISVGAENABLE) | 0x01; outSISREG(SISVGAENABLE, reg); /* Misc */ reg = inSISREG(SISMISCR) | 0x01; outSISREG(SISMISCW, reg); /* Unlock SR */ outSISIDXREG(SISSR, 0x05, 0x86); inSISIDXREG(SISSR, 0x05, reg); if(reg != 0xa1) return 0; /* Clear some regs */ for(i = 0; i < 0x22; i++) { if(0x06 + i == 0x20) continue; outSISIDXREG(SISSR, 0x06 + i, 0x00); } for(i = 0; i < 0x0b; i++) { outSISIDXREG(SISSR, 0x31 + i, 0x00); } for(i = 0; i < 0x10; i++) { outSISIDXREG(SISCR, 0x30 + i, 0x00); } ptr = cs78; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x78]; } for(i = 0; i < 3; i++) { outSISIDXREG(SISSR, 0x23 + i, ptr[i]); } ptr = cs76; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x76]; } for(i = 0; i < 2; i++) { outSISIDXREG(SISSR, 0x21 + i, ptr[i]); } v1 = 0x18; v2 = 0x00; if(ivideo->haveXGIROM) { v1 = bios[0x74]; v2 = bios[0x75]; } outSISIDXREG(SISSR, 0x07, v1); outSISIDXREG(SISSR, 0x11, 0x0f); outSISIDXREG(SISSR, 0x1f, v2); /* PCI linear mode, RelIO enabled, A0000 decoding disabled */ outSISIDXREG(SISSR, 0x20, 0x80 | 0x20 | 0x04); outSISIDXREG(SISSR, 0x27, 0x74); ptr = cs7b; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x7b]; } for(i = 0; i < 3; i++) { outSISIDXREG(SISSR, 0x31 + i, ptr[i]); } if(ivideo->chip == XGI_40) { if(ivideo->revision_id == 2) { setSISIDXREG(SISSR, 0x3b, 0x3f, 0xc0); } outSISIDXREG(SISCR, 0x7d, 0xfe); outSISIDXREG(SISCR, 0x7e, 0x0f); } if(ivideo->revision_id == 0) { /* 40 *and* 20? */ andSISIDXREG(SISCR, 0x58, 0xd7); inSISIDXREG(SISCR, 0xcb, reg); if(reg & 0x20) { setSISIDXREG(SISCR, 0x58, 0xd7, (reg & 0x10) ? 0x08 : 0x20); /* =0x28 Z7 ? */ } } reg = (ivideo->chip == XGI_40) ? 0x20 : 0x00; setSISIDXREG(SISCR, 0x38, 0x1f, reg); if(ivideo->chip == XGI_20) { outSISIDXREG(SISSR, 0x36, 0x70); } else { outSISIDXREG(SISVID, 0x00, 0x86); outSISIDXREG(SISVID, 0x32, 0x00); outSISIDXREG(SISVID, 0x30, 0x00); outSISIDXREG(SISVID, 0x32, 0x01); outSISIDXREG(SISVID, 0x30, 0x00); andSISIDXREG(SISVID, 0x2f, 0xdf); andSISIDXREG(SISCAP, 0x00, 0x3f); outSISIDXREG(SISPART1, 0x2f, 0x01); outSISIDXREG(SISPART1, 0x00, 0x00); outSISIDXREG(SISPART1, 0x02, bios[0x7e]); outSISIDXREG(SISPART1, 0x2e, 0x08); andSISIDXREG(SISPART1, 0x35, 0x7f); andSISIDXREG(SISPART1, 0x50, 0xfe); inSISIDXREG(SISPART4, 0x00, reg); if(reg == 1 || reg == 2) { outSISIDXREG(SISPART2, 0x00, 0x1c); outSISIDXREG(SISPART4, 0x0d, bios[0x7f]); outSISIDXREG(SISPART4, 0x0e, bios[0x80]); outSISIDXREG(SISPART4, 0x10, bios[0x81]); andSISIDXREG(SISPART4, 0x0f, 0x3f); inSISIDXREG(SISPART4, 0x01, reg); if((reg & 0xf0) >= 0xb0) { inSISIDXREG(SISPART4, 0x23, reg); if(reg & 0x20) reg |= 0x40; outSISIDXREG(SISPART4, 0x23, reg); reg = (reg & 0x20) ? 0x02 : 0x00; setSISIDXREG(SISPART1, 0x1e, 0xfd, reg); } } v1 = bios[0x77]; inSISIDXREG(SISSR, 0x3b, reg); if(reg & 0x02) { inSISIDXREG(SISSR, 0x3a, reg); v2 = (reg & 0x30) >> 3; if(!(v2 & 0x04)) v2 ^= 0x02; inSISIDXREG(SISSR, 0x39, reg); if(reg & 0x80) v2 |= 0x80; v2 |= 0x01; if((mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0730, NULL))) { pci_dev_put(mypdev); if(((v2 & 0x06) == 2) || ((v2 & 0x06) == 4)) v2 &= 0xf9; v2 |= 0x08; v1 &= 0xfe; } else { mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0735, NULL); if(!mypdev) mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0645, NULL); if(!mypdev) mypdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0650, NULL); if(mypdev) { pci_read_config_dword(mypdev, 0x94, &regd); regd &= 0xfffffeff; pci_write_config_dword(mypdev, 0x94, regd); v1 &= 0xfe; pci_dev_put(mypdev); } else if(sisfb_find_host_bridge(ivideo, pdev, PCI_VENDOR_ID_SI)) { v1 &= 0xfe; } else if(sisfb_find_host_bridge(ivideo, pdev, 0x1106) || sisfb_find_host_bridge(ivideo, pdev, 0x1022) || sisfb_find_host_bridge(ivideo, pdev, 0x700e) || sisfb_find_host_bridge(ivideo, pdev, 0x10de)) { if((v2 & 0x06) == 4) v2 ^= 0x06; v2 |= 0x08; } } setSISIDXREG(SISCR, 0x5f, 0xf0, v2); } outSISIDXREG(SISSR, 0x22, v1); if(ivideo->revision_id == 2) { inSISIDXREG(SISSR, 0x3b, v1); inSISIDXREG(SISSR, 0x3a, v2); regd = bios[0x90 + 3] | (bios[0x90 + 4] << 8); if( (!(v1 & 0x02)) && (v2 & 0x30) && (regd < 0xcf) ) setSISIDXREG(SISCR, 0x5f, 0xf1, 0x01); if((mypdev = pci_get_device(0x10de, 0x01e0, NULL))) { /* TODO: set CR5f &0xf1 | 0x01 for version 6570 * of nforce 2 ROM */ if(0) setSISIDXREG(SISCR, 0x5f, 0xf1, 0x01); pci_dev_put(mypdev); } } v1 = 0x30; inSISIDXREG(SISSR, 0x3b, reg); inSISIDXREG(SISCR, 0x5f, v2); if((!(reg & 0x02)) && (v2 & 0x0e)) v1 |= 0x08; outSISIDXREG(SISSR, 0x27, v1); if(bios[0x64] & 0x01) { setSISIDXREG(SISCR, 0x5f, 0xf0, bios[0x64]); } v1 = bios[0x4f7]; pci_read_config_dword(pdev, 0x50, &regd); regd = (regd >> 20) & 0x0f; if(regd == 1) { v1 &= 0xfc; orSISIDXREG(SISCR, 0x5f, 0x08); } outSISIDXREG(SISCR, 0x48, v1); setSISIDXREG(SISCR, 0x47, 0x04, bios[0x4f6] & 0xfb); setSISIDXREG(SISCR, 0x49, 0xf0, bios[0x4f8] & 0x0f); setSISIDXREG(SISCR, 0x4a, 0x60, bios[0x4f9] & 0x9f); setSISIDXREG(SISCR, 0x4b, 0x08, bios[0x4fa] & 0xf7); setSISIDXREG(SISCR, 0x4c, 0x80, bios[0x4fb] & 0x7f); outSISIDXREG(SISCR, 0x70, bios[0x4fc]); setSISIDXREG(SISCR, 0x71, 0xf0, bios[0x4fd] & 0x0f); outSISIDXREG(SISCR, 0x74, 0xd0); setSISIDXREG(SISCR, 0x74, 0xcf, bios[0x4fe] & 0x30); setSISIDXREG(SISCR, 0x75, 0xe0, bios[0x4ff] & 0x1f); setSISIDXREG(SISCR, 0x76, 0xe0, bios[0x500] & 0x1f); v1 = bios[0x501]; if((mypdev = pci_get_device(0x8086, 0x2530, NULL))) { v1 = 0xf0; pci_dev_put(mypdev); } outSISIDXREG(SISCR, 0x77, v1); } /* RAM type */ regb = 0; /* ! */ v1 = 0xff; if(ivideo->haveXGIROM) { v1 = bios[0x140 + regb]; } outSISIDXREG(SISCR, 0x6d, v1); ptr = cs128; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x128]; } for(i = 0, j = 0; i < 3; i++, j += 8) { outSISIDXREG(SISCR, 0x68 + i, ptr[j + regb]); } ptr = cs31a; ptr2 = cs33a; if(ivideo->haveXGIROM) { index = (ivideo->chip == XGI_20) ? 0x31a : 0x3a6; ptr = (const u8 *)&bios[index]; ptr2 = (const u8 *)&bios[index + 0x20]; } for(i = 0; i < 2; i++) { if(i == 0) { regd = le32_to_cpu(((u32 *)ptr)[regb]); rega = 0x6b; } else { regd = le32_to_cpu(((u32 *)ptr2)[regb]); rega = 0x6e; } reg = 0x00; for(j = 0; j < 16; j++) { reg &= 0xf3; if(regd & 0x01) reg |= 0x04; if(regd & 0x02) reg |= 0x08; regd >>= 2; outSISIDXREG(SISCR, rega, reg); inSISIDXREG(SISCR, rega, reg); inSISIDXREG(SISCR, rega, reg); reg += 0x10; } } andSISIDXREG(SISCR, 0x6e, 0xfc); ptr = NULL; if(ivideo->haveXGIROM) { index = (ivideo->chip == XGI_20) ? 0x35a : 0x3e6; ptr = (const u8 *)&bios[index]; } for(i = 0; i < 4; i++) { setSISIDXREG(SISCR, 0x6e, 0xfc, i); reg = 0x00; for(j = 0; j < 2; j++) { regd = 0; if(ptr) { regd = le32_to_cpu(((u32 *)ptr)[regb * 8]); ptr += 4; } /* reg = 0x00; */ for(k = 0; k < 16; k++) { reg &= 0xfc; if(regd & 0x01) reg |= 0x01; if(regd & 0x02) reg |= 0x02; regd >>= 2; outSISIDXREG(SISCR, 0x6f, reg); inSISIDXREG(SISCR, 0x6f, reg); inSISIDXREG(SISCR, 0x6f, reg); reg += 0x08; } } } ptr = cs148; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x148]; } for(i = 0, j = 0; i < 2; i++, j += 8) { outSISIDXREG(SISCR, 0x80 + i, ptr[j + regb]); } andSISIDXREG(SISCR, 0x89, 0x8f); ptr = cs45a; if(ivideo->haveXGIROM) { index = (ivideo->chip == XGI_20) ? 0x45a : 0x4e6; ptr = (const u8 *)&bios[index]; } regd = le16_to_cpu(((const u16 *)ptr)[regb]); reg = 0x80; for(i = 0; i < 5; i++) { reg &= 0xfc; if(regd & 0x01) reg |= 0x01; if(regd & 0x02) reg |= 0x02; regd >>= 2; outSISIDXREG(SISCR, 0x89, reg); inSISIDXREG(SISCR, 0x89, reg); inSISIDXREG(SISCR, 0x89, reg); reg += 0x10; } v1 = 0xb5; v2 = 0x20; v3 = 0xf0; v4 = 0x13; if(ivideo->haveXGIROM) { v1 = bios[0x118 + regb]; v2 = bios[0xf8 + regb]; v3 = bios[0x120 + regb]; v4 = bios[0x1ca]; } outSISIDXREG(SISCR, 0x45, v1 & 0x0f); outSISIDXREG(SISCR, 0x99, (v1 >> 4) & 0x07); orSISIDXREG(SISCR, 0x40, v1 & 0x80); outSISIDXREG(SISCR, 0x41, v2); ptr = cs170; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x170]; } for(i = 0, j = 0; i < 7; i++, j += 8) { outSISIDXREG(SISCR, 0x90 + i, ptr[j + regb]); } outSISIDXREG(SISCR, 0x59, v3); ptr = cs1a8; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x1a8]; } for(i = 0, j = 0; i < 3; i++, j += 8) { outSISIDXREG(SISCR, 0xc3 + i, ptr[j + regb]); } ptr = cs100; if(ivideo->haveXGIROM) { ptr = (const u8 *)&bios[0x100]; } for(i = 0, j = 0; i < 2; i++, j += 8) { outSISIDXREG(SISCR, 0x8a + i, ptr[j + regb]); } outSISIDXREG(SISCR, 0xcf, v4); outSISIDXREG(SISCR, 0x83, 0x09); outSISIDXREG(SISCR, 0x87, 0x00); if(ivideo->chip == XGI_40) { if( (ivideo->revision_id == 1) || (ivideo->revision_id == 2) ) { outSISIDXREG(SISCR, 0x8c, 0x87); } } outSISIDXREG(SISSR, 0x17, 0x00); outSISIDXREG(SISSR, 0x1a, 0x87); if(ivideo->chip == XGI_20) { outSISIDXREG(SISSR, 0x15, 0x00); outSISIDXREG(SISSR, 0x1c, 0x00); } ramtype = 0x00; v1 = 0x10; if(ivideo->haveXGIROM) { ramtype = bios[0x62]; v1 = bios[0x1d2]; } if(!(ramtype & 0x80)) { if(ivideo->chip == XGI_20) { outSISIDXREG(SISCR, 0x97, v1); inSISIDXREG(SISCR, 0x97, reg); if(reg & 0x10) { ramtype = (reg & 0x01) << 1; } } else { inSISIDXREG(SISSR, 0x39, reg); ramtype = reg & 0x02; if(!(ramtype)) { inSISIDXREG(SISSR, 0x3a, reg); ramtype = (reg >> 1) & 0x01; } } } ramtype &= 0x07; regb = 0; /* ! */ switch(ramtype) { case 0: sisfb_post_xgi_setclocks(ivideo, regb); if((ivideo->chip == XGI_20) || (ivideo->revision_id == 1) || (ivideo->revision_id == 2)) { v1 = cs158[regb]; v2 = cs160[regb]; v3 = cs168[regb]; if(ivideo->haveXGIROM) { v1 = bios[regb + 0x158]; v2 = bios[regb + 0x160]; v3 = bios[regb + 0x168]; } outSISIDXREG(SISCR, 0x82, v1); outSISIDXREG(SISCR, 0x85, v2); outSISIDXREG(SISCR, 0x86, v3); } else { outSISIDXREG(SISCR, 0x82, 0x88); outSISIDXREG(SISCR, 0x86, 0x00); inSISIDXREG(SISCR, 0x86, reg); outSISIDXREG(SISCR, 0x86, 0x88); inSISIDXREG(SISCR, 0x86, reg); outSISIDXREG(SISCR, 0x86, bios[regb + 0x168]); outSISIDXREG(SISCR, 0x82, 0x77); outSISIDXREG(SISCR, 0x85, 0x00); inSISIDXREG(SISCR, 0x85, reg); outSISIDXREG(SISCR, 0x85, 0x88); inSISIDXREG(SISCR, 0x85, reg); outSISIDXREG(SISCR, 0x85, bios[regb + 0x160]); outSISIDXREG(SISCR, 0x82, bios[regb + 0x158]); } if(ivideo->chip == XGI_40) { outSISIDXREG(SISCR, 0x97, 0x00); } outSISIDXREG(SISCR, 0x98, 0x01); outSISIDXREG(SISCR, 0x9a, 0x02); outSISIDXREG(SISSR, 0x18, 0x01); if((ivideo->chip == XGI_20) || (ivideo->revision_id == 2)) { outSISIDXREG(SISSR, 0x19, 0x40); } else { outSISIDXREG(SISSR, 0x19, 0x20); } outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); if((ivideo->chip == XGI_20) || (bios[0x1cb] != 0x0c)) { sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); outSISIDXREG(SISSR, 0x18, 0x00); if((ivideo->chip == XGI_20) || (ivideo->revision_id == 2)) { outSISIDXREG(SISSR, 0x19, 0x40); } else { outSISIDXREG(SISSR, 0x19, 0x20); } } else if((ivideo->chip == XGI_40) && (bios[0x1cb] == 0x0c)) { /* outSISIDXREG(SISSR, 0x16, 0x0c); */ /* ? */ } outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); sisfb_post_xgi_delay(ivideo, 4); v1 = 0x31; v2 = 0x03; v3 = 0x83; v4 = 0x03; v5 = 0x83; if(ivideo->haveXGIROM) { v1 = bios[0xf0]; index = (ivideo->chip == XGI_20) ? 0x4b2 : 0x53e; v2 = bios[index]; v3 = bios[index + 1]; v4 = bios[index + 2]; v5 = bios[index + 3]; } outSISIDXREG(SISSR, 0x18, v1); outSISIDXREG(SISSR, 0x19, ((ivideo->chip == XGI_20) ? 0x02 : 0x01)); outSISIDXREG(SISSR, 0x16, v2); outSISIDXREG(SISSR, 0x16, v3); sisfb_post_xgi_delay(ivideo, 0x43); outSISIDXREG(SISSR, 0x1b, 0x03); sisfb_post_xgi_delay(ivideo, 0x22); outSISIDXREG(SISSR, 0x18, v1); outSISIDXREG(SISSR, 0x19, 0x00); outSISIDXREG(SISSR, 0x16, v4); outSISIDXREG(SISSR, 0x16, v5); outSISIDXREG(SISSR, 0x1b, 0x00); break; case 1: outSISIDXREG(SISCR, 0x82, 0x77); outSISIDXREG(SISCR, 0x86, 0x00); inSISIDXREG(SISCR, 0x86, reg); outSISIDXREG(SISCR, 0x86, 0x88); inSISIDXREG(SISCR, 0x86, reg); v1 = cs168[regb]; v2 = cs160[regb]; v3 = cs158[regb]; if(ivideo->haveXGIROM) { v1 = bios[regb + 0x168]; v2 = bios[regb + 0x160]; v3 = bios[regb + 0x158]; } outSISIDXREG(SISCR, 0x86, v1); outSISIDXREG(SISCR, 0x82, 0x77); outSISIDXREG(SISCR, 0x85, 0x00); inSISIDXREG(SISCR, 0x85, reg); outSISIDXREG(SISCR, 0x85, 0x88); inSISIDXREG(SISCR, 0x85, reg); outSISIDXREG(SISCR, 0x85, v2); outSISIDXREG(SISCR, 0x82, v3); outSISIDXREG(SISCR, 0x98, 0x01); outSISIDXREG(SISCR, 0x9a, 0x02); outSISIDXREG(SISSR, 0x28, 0x64); outSISIDXREG(SISSR, 0x29, 0x63); sisfb_post_xgi_delay(ivideo, 15); outSISIDXREG(SISSR, 0x18, 0x00); outSISIDXREG(SISSR, 0x19, 0x20); outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); outSISIDXREG(SISSR, 0x18, 0xc5); outSISIDXREG(SISSR, 0x19, 0x23); outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); sisfb_post_xgi_delay(ivideo, 1); outSISIDXREG(SISCR, 0x97,0x11); sisfb_post_xgi_setclocks(ivideo, regb); sisfb_post_xgi_delay(ivideo, 0x46); outSISIDXREG(SISSR, 0x18, 0xc5); outSISIDXREG(SISSR, 0x19, 0x23); outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); sisfb_post_xgi_delay(ivideo, 1); outSISIDXREG(SISSR, 0x1b, 0x04); sisfb_post_xgi_delay(ivideo, 1); outSISIDXREG(SISSR, 0x1b, 0x00); sisfb_post_xgi_delay(ivideo, 1); v1 = 0x31; if(ivideo->haveXGIROM) { v1 = bios[0xf0]; } outSISIDXREG(SISSR, 0x18, v1); outSISIDXREG(SISSR, 0x19, 0x06); outSISIDXREG(SISSR, 0x16, 0x04); outSISIDXREG(SISSR, 0x16, 0x84); sisfb_post_xgi_delay(ivideo, 1); break; default: sisfb_post_xgi_setclocks(ivideo, regb); if((ivideo->chip == XGI_40) && ((ivideo->revision_id == 1) || (ivideo->revision_id == 2))) { outSISIDXREG(SISCR, 0x82, bios[regb + 0x158]); outSISIDXREG(SISCR, 0x85, bios[regb + 0x160]); outSISIDXREG(SISCR, 0x86, bios[regb + 0x168]); } else { outSISIDXREG(SISCR, 0x82, 0x88); outSISIDXREG(SISCR, 0x86, 0x00); inSISIDXREG(SISCR, 0x86, reg); outSISIDXREG(SISCR, 0x86, 0x88); outSISIDXREG(SISCR, 0x82, 0x77); outSISIDXREG(SISCR, 0x85, 0x00); inSISIDXREG(SISCR, 0x85, reg); outSISIDXREG(SISCR, 0x85, 0x88); inSISIDXREG(SISCR, 0x85, reg); v1 = cs160[regb]; v2 = cs158[regb]; if(ivideo->haveXGIROM) { v1 = bios[regb + 0x160]; v2 = bios[regb + 0x158]; } outSISIDXREG(SISCR, 0x85, v1); outSISIDXREG(SISCR, 0x82, v2); } if(ivideo->chip == XGI_40) { outSISIDXREG(SISCR, 0x97, 0x11); } if((ivideo->chip == XGI_40) && (ivideo->revision_id == 2)) { outSISIDXREG(SISCR, 0x98, 0x01); } else { outSISIDXREG(SISCR, 0x98, 0x03); } outSISIDXREG(SISCR, 0x9a, 0x02); if(ivideo->chip == XGI_40) { outSISIDXREG(SISSR, 0x18, 0x01); } else { outSISIDXREG(SISSR, 0x18, 0x00); } outSISIDXREG(SISSR, 0x19, 0x40); outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); if((ivideo->chip == XGI_40) && (bios[0x1cb] != 0x0c)) { sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); sisfb_post_xgi_delay(ivideo, 0x43); outSISIDXREG(SISSR, 0x18, 0x00); outSISIDXREG(SISSR, 0x19, 0x40); outSISIDXREG(SISSR, 0x16, 0x00); outSISIDXREG(SISSR, 0x16, 0x80); } sisfb_post_xgi_delay(ivideo, 4); v1 = 0x31; if(ivideo->haveXGIROM) { v1 = bios[0xf0]; } outSISIDXREG(SISSR, 0x18, v1); outSISIDXREG(SISSR, 0x19, 0x01); if(ivideo->chip == XGI_40) { outSISIDXREG(SISSR, 0x16, bios[0x53e]); outSISIDXREG(SISSR, 0x16, bios[0x53f]); } else { outSISIDXREG(SISSR, 0x16, 0x05); outSISIDXREG(SISSR, 0x16, 0x85); } sisfb_post_xgi_delay(ivideo, 0x43); if(ivideo->chip == XGI_40) { outSISIDXREG(SISSR, 0x1b, 0x01); } else { outSISIDXREG(SISSR, 0x1b, 0x03); } sisfb_post_xgi_delay(ivideo, 0x22); outSISIDXREG(SISSR, 0x18, v1); outSISIDXREG(SISSR, 0x19, 0x00); if(ivideo->chip == XGI_40) { outSISIDXREG(SISSR, 0x16, bios[0x540]); outSISIDXREG(SISSR, 0x16, bios[0x541]); } else { outSISIDXREG(SISSR, 0x16, 0x05); outSISIDXREG(SISSR, 0x16, 0x85); } outSISIDXREG(SISSR, 0x1b, 0x00); } regb = 0; /* ! */ v1 = 0x03; if(ivideo->haveXGIROM) { v1 = bios[0x110 + regb]; } outSISIDXREG(SISSR, 0x1b, v1); /* RAM size */ v1 = 0x00; v2 = 0x00; if(ivideo->haveXGIROM) { v1 = bios[0x62]; v2 = bios[0x63]; } regb = 0; /* ! */ regd = 1 << regb; if((v1 & 0x40) && (v2 & regd) && ivideo->haveXGIROM) { outSISIDXREG(SISSR, 0x13, bios[regb + 0xe0]); outSISIDXREG(SISSR, 0x14, bios[regb + 0xe0 + 8]); } else { /* Set default mode, don't clear screen */ ivideo->SiS_Pr.SiS_UseOEM = false; SiS_SetEnableDstn(&ivideo->SiS_Pr, false); SiS_SetEnableFstn(&ivideo->SiS_Pr, false); ivideo->curFSTN = ivideo->curDSTN = 0; ivideo->SiS_Pr.VideoMemorySize = 8 << 20; SiSSetMode(&ivideo->SiS_Pr, 0x2e | 0x80); outSISIDXREG(SISSR, 0x05, 0x86); /* Disable read-cache */ andSISIDXREG(SISSR, 0x21, 0xdf); sisfb_post_xgi_ramsize(ivideo); /* Enable read-cache */ orSISIDXREG(SISSR, 0x21, 0x20); } #if 0 printk(KERN_DEBUG "-----------------\n"); for(i = 0; i < 0xff; i++) { inSISIDXREG(SISCR, i, reg); printk(KERN_DEBUG "CR%02x(%x) = 0x%02x\n", i, SISCR, reg); } for(i = 0; i < 0x40; i++) { inSISIDXREG(SISSR, i, reg); printk(KERN_DEBUG "SR%02x(%x) = 0x%02x\n", i, SISSR, reg); } printk(KERN_DEBUG "-----------------\n"); #endif /* Sense CRT1 */ if(ivideo->chip == XGI_20) { orSISIDXREG(SISCR, 0x32, 0x20); } else { inSISIDXREG(SISPART4, 0x00, reg); if((reg == 1) || (reg == 2)) { sisfb_sense_crt1(ivideo); } else { orSISIDXREG(SISCR, 0x32, 0x20); } } /* Set default mode, don't clear screen */ ivideo->SiS_Pr.SiS_UseOEM = false; SiS_SetEnableDstn(&ivideo->SiS_Pr, false); SiS_SetEnableFstn(&ivideo->SiS_Pr, false); ivideo->curFSTN = ivideo->curDSTN = 0; SiSSetMode(&ivideo->SiS_Pr, 0x2e | 0x80); outSISIDXREG(SISSR, 0x05, 0x86); /* Display off */ orSISIDXREG(SISSR, 0x01, 0x20); /* Save mode number in CR34 */ outSISIDXREG(SISCR, 0x34, 0x2e); /* Let everyone know what the current mode is */ ivideo->modeprechange = 0x2e; if(ivideo->chip == XGI_40) { inSISIDXREG(SISCR, 0xca, reg); inSISIDXREG(SISCR, 0xcc, v1); if((reg & 0x10) && (!(v1 & 0x04))) { printk(KERN_ERR "sisfb: Please connect power to the card.\n"); return 0; } } return 1; } #endif static int __devinit sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sisfb_chip_info *chipinfo = &sisfb_chip_info[ent->driver_data]; struct sis_video_info *ivideo = NULL; struct fb_info *sis_fb_info = NULL; u16 reg16; u8 reg; int i, ret; if(sisfb_off) return -ENXIO; sis_fb_info = framebuffer_alloc(sizeof(*ivideo), &pdev->dev); if(!sis_fb_info) return -ENOMEM; ivideo = (struct sis_video_info *)sis_fb_info->par; ivideo->memyselfandi = sis_fb_info; ivideo->sisfb_id = SISFB_ID; if(card_list == NULL) { ivideo->cardnumber = 0; } else { struct sis_video_info *countvideo = card_list; ivideo->cardnumber = 1; while((countvideo = countvideo->next) != NULL) ivideo->cardnumber++; } strncpy(ivideo->myid, chipinfo->chip_name, 30); ivideo->warncount = 0; ivideo->chip_id = pdev->device; ivideo->chip_vendor = pdev->vendor; ivideo->revision_id = pdev->revision; ivideo->SiS_Pr.ChipRevision = ivideo->revision_id; pci_read_config_word(pdev, PCI_COMMAND, &reg16); ivideo->sisvga_enabled = reg16 & 0x01; ivideo->pcibus = pdev->bus->number; ivideo->pcislot = PCI_SLOT(pdev->devfn); ivideo->pcifunc = PCI_FUNC(pdev->devfn); ivideo->subsysvendor = pdev->subsystem_vendor; ivideo->subsysdevice = pdev->subsystem_device; #ifndef MODULE if(sisfb_mode_idx == -1) { sisfb_get_vga_mode_from_kernel(); } #endif ivideo->chip = chipinfo->chip; ivideo->sisvga_engine = chipinfo->vgaengine; ivideo->hwcursor_size = chipinfo->hwcursor_size; ivideo->CRT2_write_enable = chipinfo->CRT2_write_enable; ivideo->mni = chipinfo->mni; ivideo->detectedpdc = 0xff; ivideo->detectedpdca = 0xff; ivideo->detectedlcda = 0xff; ivideo->sisfb_thismonitor.datavalid = false; ivideo->current_base = 0; ivideo->engineok = 0; ivideo->sisfb_was_boot_device = 0; if(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) { if(ivideo->sisvga_enabled) ivideo->sisfb_was_boot_device = 1; else { printk(KERN_DEBUG "sisfb: PCI device is disabled, " "but marked as boot video device ???\n"); printk(KERN_DEBUG "sisfb: I will not accept this " "as the primary VGA device\n"); } } ivideo->sisfb_parm_mem = sisfb_parm_mem; ivideo->sisfb_accel = sisfb_accel; ivideo->sisfb_ypan = sisfb_ypan; ivideo->sisfb_max = sisfb_max; ivideo->sisfb_userom = sisfb_userom; ivideo->sisfb_useoem = sisfb_useoem; ivideo->sisfb_mode_idx = sisfb_mode_idx; ivideo->sisfb_parm_rate = sisfb_parm_rate; ivideo->sisfb_crt1off = sisfb_crt1off; ivideo->sisfb_forcecrt1 = sisfb_forcecrt1; ivideo->sisfb_crt2type = sisfb_crt2type; ivideo->sisfb_crt2flags = sisfb_crt2flags; /* pdc(a), scalelcd, special timing, lvdshl handled below */ ivideo->sisfb_dstn = sisfb_dstn; ivideo->sisfb_fstn = sisfb_fstn; ivideo->sisfb_tvplug = sisfb_tvplug; ivideo->sisfb_tvstd = sisfb_tvstd; ivideo->tvxpos = sisfb_tvxposoffset; ivideo->tvypos = sisfb_tvyposoffset; ivideo->sisfb_nocrt2rate = sisfb_nocrt2rate; ivideo->refresh_rate = 0; if(ivideo->sisfb_parm_rate != -1) { ivideo->refresh_rate = ivideo->sisfb_parm_rate; } ivideo->SiS_Pr.UsePanelScaler = sisfb_scalelcd; ivideo->SiS_Pr.CenterScreen = -1; ivideo->SiS_Pr.SiS_CustomT = sisfb_specialtiming; ivideo->SiS_Pr.LVDSHL = sisfb_lvdshl; ivideo->SiS_Pr.SiS_Backup70xx = 0xff; ivideo->SiS_Pr.SiS_CHOverScan = -1; ivideo->SiS_Pr.SiS_ChSW = false; ivideo->SiS_Pr.SiS_UseLCDA = false; ivideo->SiS_Pr.HaveEMI = false; ivideo->SiS_Pr.HaveEMILCD = false; ivideo->SiS_Pr.OverruleEMI = false; ivideo->SiS_Pr.SiS_SensibleSR11 = false; ivideo->SiS_Pr.SiS_MyCR63 = 0x63; ivideo->SiS_Pr.PDC = -1; ivideo->SiS_Pr.PDCA = -1; ivideo->SiS_Pr.DDCPortMixup = false; #ifdef CONFIG_FB_SIS_315 if(ivideo->chip >= SIS_330) { ivideo->SiS_Pr.SiS_MyCR63 = 0x53; if(ivideo->chip >= SIS_661) { ivideo->SiS_Pr.SiS_SensibleSR11 = true; } } #endif memcpy(&ivideo->default_var, &my_default_var, sizeof(my_default_var)); pci_set_drvdata(pdev, ivideo); /* Patch special cases */ if((ivideo->nbridge = sisfb_get_northbridge(ivideo->chip))) { switch(ivideo->nbridge->device) { #ifdef CONFIG_FB_SIS_300 case PCI_DEVICE_ID_SI_730: ivideo->chip = SIS_730; strcpy(ivideo->myid, "SiS 730"); break; #endif #ifdef CONFIG_FB_SIS_315 case PCI_DEVICE_ID_SI_651: /* ivideo->chip is ok */ strcpy(ivideo->myid, "SiS 651"); break; case PCI_DEVICE_ID_SI_740: ivideo->chip = SIS_740; strcpy(ivideo->myid, "SiS 740"); break; case PCI_DEVICE_ID_SI_661: ivideo->chip = SIS_661; strcpy(ivideo->myid, "SiS 661"); break; case PCI_DEVICE_ID_SI_741: ivideo->chip = SIS_741; strcpy(ivideo->myid, "SiS 741"); break; case PCI_DEVICE_ID_SI_760: ivideo->chip = SIS_760; strcpy(ivideo->myid, "SiS 760"); break; case PCI_DEVICE_ID_SI_761: ivideo->chip = SIS_761; strcpy(ivideo->myid, "SiS 761"); break; #endif default: break; } } ivideo->SiS_Pr.ChipType = ivideo->chip; ivideo->SiS_Pr.ivideo = (void *)ivideo; #ifdef CONFIG_FB_SIS_315 if((ivideo->SiS_Pr.ChipType == SIS_315PRO) || (ivideo->SiS_Pr.ChipType == SIS_315)) { ivideo->SiS_Pr.ChipType = SIS_315H; } #endif if(!ivideo->sisvga_enabled) { if(pci_enable_device(pdev)) { if(ivideo->nbridge) pci_dev_put(ivideo->nbridge); pci_set_drvdata(pdev, NULL); framebuffer_release(sis_fb_info); return -EIO; } } ivideo->video_base = pci_resource_start(pdev, 0); ivideo->mmio_base = pci_resource_start(pdev, 1); ivideo->mmio_size = pci_resource_len(pdev, 1); ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; ivideo->SiS_Pr.IOAddress = ivideo->vga_base = ivideo->SiS_Pr.RelIO; SiSRegInit(&ivideo->SiS_Pr, ivideo->SiS_Pr.IOAddress); #ifdef CONFIG_FB_SIS_300 /* Find PCI systems for Chrontel/GPIO communication setup */ if(ivideo->chip == SIS_630) { i = 0; do { if(mychswtable[i].subsysVendor == ivideo->subsysvendor && mychswtable[i].subsysCard == ivideo->subsysdevice) { ivideo->SiS_Pr.SiS_ChSW = true; printk(KERN_DEBUG "sisfb: Identified [%s %s] " "requiring Chrontel/GPIO setup\n", mychswtable[i].vendorName, mychswtable[i].cardName); ivideo->lpcdev = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, NULL); break; } i++; } while(mychswtable[i].subsysVendor != 0); } #endif #ifdef CONFIG_FB_SIS_315 if((ivideo->chip == SIS_760) && (ivideo->nbridge)) { ivideo->lpcdev = pci_get_slot(ivideo->nbridge->bus, (2 << 3)); } #endif outSISIDXREG(SISSR, 0x05, 0x86); if( (!ivideo->sisvga_enabled) #if !defined(__i386__) && !defined(__x86_64__) || (sisfb_resetcard) #endif ) { for(i = 0x30; i <= 0x3f; i++) { outSISIDXREG(SISCR, i, 0x00); } } /* Find out about current video mode */ ivideo->modeprechange = 0x03; inSISIDXREG(SISCR, 0x34, reg); if(reg & 0x7f) { ivideo->modeprechange = reg & 0x7f; } else if(ivideo->sisvga_enabled) { #if defined(__i386__) || defined(__x86_64__) unsigned char __iomem *tt = ioremap(0x400, 0x100); if(tt) { ivideo->modeprechange = readb(tt + 0x49); iounmap(tt); } #endif } /* Search and copy ROM image */ ivideo->bios_abase = NULL; ivideo->SiS_Pr.VirtualRomBase = NULL; ivideo->SiS_Pr.UseROM = false; ivideo->haveXGIROM = ivideo->SiS_Pr.SiS_XGIROM = false; if(ivideo->sisfb_userom) { ivideo->SiS_Pr.VirtualRomBase = sisfb_find_rom(pdev); ivideo->bios_abase = ivideo->SiS_Pr.VirtualRomBase; ivideo->SiS_Pr.UseROM = (bool)(ivideo->SiS_Pr.VirtualRomBase); printk(KERN_INFO "sisfb: Video ROM %sfound\n", ivideo->SiS_Pr.UseROM ? "" : "not "); if((ivideo->SiS_Pr.UseROM) && (ivideo->chip >= XGI_20)) { ivideo->SiS_Pr.UseROM = false; ivideo->haveXGIROM = ivideo->SiS_Pr.SiS_XGIROM = true; if( (ivideo->revision_id == 2) && (!(ivideo->bios_abase[0x1d1] & 0x01)) ) { ivideo->SiS_Pr.DDCPortMixup = true; } } } else { printk(KERN_INFO "sisfb: Video ROM usage disabled\n"); } /* Find systems for special custom timing */ if(ivideo->SiS_Pr.SiS_CustomT == CUT_NONE) { sisfb_detect_custom_timing(ivideo); } /* POST card in case this has not been done by the BIOS */ if( (!ivideo->sisvga_enabled) #if !defined(__i386__) && !defined(__x86_64__) || (sisfb_resetcard) #endif ) { #ifdef CONFIG_FB_SIS_300 if(ivideo->sisvga_engine == SIS_300_VGA) { if(ivideo->chip == SIS_300) { sisfb_post_sis300(pdev); ivideo->sisfb_can_post = 1; } } #endif #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { int result = 1; /* if((ivideo->chip == SIS_315H) || (ivideo->chip == SIS_315) || (ivideo->chip == SIS_315PRO) || (ivideo->chip == SIS_330)) { sisfb_post_sis315330(pdev); } else */ if(ivideo->chip == XGI_20) { result = sisfb_post_xgi(pdev); ivideo->sisfb_can_post = 1; } else if((ivideo->chip == XGI_40) && ivideo->haveXGIROM) { result = sisfb_post_xgi(pdev); ivideo->sisfb_can_post = 1; } else { printk(KERN_INFO "sisfb: Card is not " "POSTed and sisfb can't do this either.\n"); } if(!result) { printk(KERN_ERR "sisfb: Failed to POST card\n"); ret = -ENODEV; goto error_3; } } #endif } ivideo->sisfb_card_posted = 1; /* Find out about RAM size */ if(sisfb_get_dram_size(ivideo)) { printk(KERN_INFO "sisfb: Fatal error: Unable to determine VRAM size.\n"); ret = -ENODEV; goto error_3; } /* Enable PCI addressing and MMIO */ if((ivideo->sisfb_mode_idx < 0) || ((sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]) != 0xFF)) { /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */ orSISIDXREG(SISSR, IND_SIS_PCI_ADDRESS_SET, (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE)); /* Enable 2D accelerator engine */ orSISIDXREG(SISSR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D); } if(sisfb_pdc != 0xff) { if(ivideo->sisvga_engine == SIS_300_VGA) sisfb_pdc &= 0x3c; else sisfb_pdc &= 0x1f; ivideo->SiS_Pr.PDC = sisfb_pdc; } #ifdef CONFIG_FB_SIS_315 if(ivideo->sisvga_engine == SIS_315_VGA) { if(sisfb_pdca != 0xff) ivideo->SiS_Pr.PDCA = sisfb_pdca & 0x1f; } #endif if(!request_mem_region(ivideo->video_base, ivideo->video_size, "sisfb FB")) { printk(KERN_ERR "sisfb: Fatal error: Unable to reserve %dMB framebuffer memory\n", (int)(ivideo->video_size >> 20)); printk(KERN_ERR "sisfb: Is there another framebuffer driver active?\n"); ret = -ENODEV; goto error_3; } if(!request_mem_region(ivideo->mmio_base, ivideo->mmio_size, "sisfb MMIO")) { printk(KERN_ERR "sisfb: Fatal error: Unable to reserve MMIO region\n"); ret = -ENODEV; goto error_2; } ivideo->video_vbase = ioremap(ivideo->video_base, ivideo->video_size); ivideo->SiS_Pr.VideoMemoryAddress = ivideo->video_vbase; if(!ivideo->video_vbase) { printk(KERN_ERR "sisfb: Fatal error: Unable to map framebuffer memory\n"); ret = -ENODEV; goto error_1; } ivideo->mmio_vbase = ioremap(ivideo->mmio_base, ivideo->mmio_size); if(!ivideo->mmio_vbase) { printk(KERN_ERR "sisfb: Fatal error: Unable to map MMIO region\n"); ret = -ENODEV; error_0: iounmap(ivideo->video_vbase); error_1: release_mem_region(ivideo->video_base, ivideo->video_size); error_2: release_mem_region(ivideo->mmio_base, ivideo->mmio_size); error_3: vfree(ivideo->bios_abase); if(ivideo->lpcdev) pci_dev_put(ivideo->lpcdev); if(ivideo->nbridge) pci_dev_put(ivideo->nbridge); pci_set_drvdata(pdev, NULL); if(!ivideo->sisvga_enabled) pci_disable_device(pdev); framebuffer_release(sis_fb_info); return ret; } printk(KERN_INFO "sisfb: Video RAM at 0x%lx, mapped to 0x%lx, size %ldk\n", ivideo->video_base, (unsigned long)ivideo->video_vbase, ivideo->video_size / 1024); if(ivideo->video_offset) { printk(KERN_INFO "sisfb: Viewport offset %ldk\n", ivideo->video_offset / 1024); } printk(KERN_INFO "sisfb: MMIO at 0x%lx, mapped to 0x%lx, size %ldk\n", ivideo->mmio_base, (unsigned long)ivideo->mmio_vbase, ivideo->mmio_size / 1024); /* Determine the size of the command queue */ if(ivideo->sisvga_engine == SIS_300_VGA) { ivideo->cmdQueueSize = TURBO_QUEUE_AREA_SIZE; } else { if(ivideo->chip == XGI_20) { ivideo->cmdQueueSize = COMMAND_QUEUE_AREA_SIZE_Z7; } else { ivideo->cmdQueueSize = COMMAND_QUEUE_AREA_SIZE; } } /* Engines are no longer initialized here; this is * now done after the first mode-switch (if the * submitted var has its acceleration flags set). */ /* Calculate the base of the (unused) hw cursor */ ivideo->hwcursor_vbase = ivideo->video_vbase + ivideo->video_size - ivideo->cmdQueueSize - ivideo->hwcursor_size; ivideo->caps |= HW_CURSOR_CAP; /* Initialize offscreen memory manager */ if((ivideo->havenoheap = sisfb_heap_init(ivideo))) { printk(KERN_WARNING "sisfb: Failed to initialize offscreen memory heap\n"); } /* Used for clearing the screen only, therefore respect our mem limit */ ivideo->SiS_Pr.VideoMemoryAddress += ivideo->video_offset; ivideo->SiS_Pr.VideoMemorySize = ivideo->sisfb_mem; ivideo->mtrr = -1; ivideo->vbflags = 0; ivideo->lcddefmodeidx = DEFAULT_LCDMODE; ivideo->tvdefmodeidx = DEFAULT_TVMODE; ivideo->defmodeidx = DEFAULT_MODE; ivideo->newrom = 0; if(ivideo->chip < XGI_20) { if(ivideo->bios_abase) { ivideo->newrom = SiSDetermineROMLayout661(&ivideo->SiS_Pr); } } if((ivideo->sisfb_mode_idx < 0) || ((sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]) != 0xFF)) { sisfb_sense_crt1(ivideo); sisfb_get_VB_type(ivideo); if(ivideo->vbflags2 & VB2_VIDEOBRIDGE) { sisfb_detect_VB_connect(ivideo); } ivideo->currentvbflags = ivideo->vbflags & (VB_VIDEOBRIDGE | TV_STANDARD); /* Decide on which CRT2 device to use */ if(ivideo->vbflags2 & VB2_VIDEOBRIDGE) { if(ivideo->sisfb_crt2type != -1) { if((ivideo->sisfb_crt2type == CRT2_LCD) && (ivideo->vbflags & CRT2_LCD)) { ivideo->currentvbflags |= CRT2_LCD; } else if(ivideo->sisfb_crt2type != CRT2_LCD) { ivideo->currentvbflags |= ivideo->sisfb_crt2type; } } else { /* Chrontel 700x TV detection often unreliable, therefore * use a different default order on such machines */ if((ivideo->sisvga_engine == SIS_300_VGA) && (ivideo->vbflags2 & VB2_CHRONTEL)) { if(ivideo->vbflags & CRT2_LCD) ivideo->currentvbflags |= CRT2_LCD; else if(ivideo->vbflags & CRT2_TV) ivideo->currentvbflags |= CRT2_TV; else if(ivideo->vbflags & CRT2_VGA) ivideo->currentvbflags |= CRT2_VGA; } else { if(ivideo->vbflags & CRT2_TV) ivideo->currentvbflags |= CRT2_TV; else if(ivideo->vbflags & CRT2_LCD) ivideo->currentvbflags |= CRT2_LCD; else if(ivideo->vbflags & CRT2_VGA) ivideo->currentvbflags |= CRT2_VGA; } } } if(ivideo->vbflags & CRT2_LCD) { sisfb_detect_lcd_type(ivideo); } sisfb_save_pdc_emi(ivideo); if(!ivideo->sisfb_crt1off) { sisfb_handle_ddc(ivideo, &ivideo->sisfb_thismonitor, 0); } else { if((ivideo->vbflags2 & VB2_SISTMDSBRIDGE) && (ivideo->vbflags & (CRT2_VGA | CRT2_LCD))) { sisfb_handle_ddc(ivideo, &ivideo->sisfb_thismonitor, 1); } } if(ivideo->sisfb_mode_idx >= 0) { int bu = ivideo->sisfb_mode_idx; ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo, ivideo->sisfb_mode_idx, ivideo->currentvbflags); if(bu != ivideo->sisfb_mode_idx) { printk(KERN_ERR "Mode %dx%dx%d failed validation\n", sisbios_mode[bu].xres, sisbios_mode[bu].yres, sisbios_mode[bu].bpp); } } if(ivideo->sisfb_mode_idx < 0) { switch(ivideo->currentvbflags & VB_DISPTYPE_DISP2) { case CRT2_LCD: ivideo->sisfb_mode_idx = ivideo->lcddefmodeidx; break; case CRT2_TV: ivideo->sisfb_mode_idx = ivideo->tvdefmodeidx; break; default: ivideo->sisfb_mode_idx = ivideo->defmodeidx; break; } } ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]; if(ivideo->refresh_rate != 0) { sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx); } if(ivideo->rate_idx == 0) { ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx; ivideo->refresh_rate = 60; } if(ivideo->sisfb_thismonitor.datavalid) { if(!sisfb_verify_rate(ivideo, &ivideo->sisfb_thismonitor, ivideo->sisfb_mode_idx, ivideo->rate_idx, ivideo->refresh_rate)) { printk(KERN_INFO "sisfb: WARNING: Refresh rate " "exceeds monitor specs!\n"); } } ivideo->video_bpp = sisbios_mode[ivideo->sisfb_mode_idx].bpp; ivideo->video_width = sisbios_mode[ivideo->sisfb_mode_idx].xres; ivideo->video_height = sisbios_mode[ivideo->sisfb_mode_idx].yres; sisfb_set_vparms(ivideo); printk(KERN_INFO "sisfb: Default mode is %dx%dx%d (%dHz)\n", ivideo->video_width, ivideo->video_height, ivideo->video_bpp, ivideo->refresh_rate); /* Set up the default var according to chosen default display mode */ ivideo->default_var.xres = ivideo->default_var.xres_virtual = ivideo->video_width; ivideo->default_var.yres = ivideo->default_var.yres_virtual = ivideo->video_height; ivideo->default_var.bits_per_pixel = ivideo->video_bpp; sisfb_bpp_to_var(ivideo, &ivideo->default_var); ivideo->default_var.pixclock = (u32) (1000000000 / sisfb_mode_rate_to_dclock(&ivideo->SiS_Pr, ivideo->mode_no, ivideo->rate_idx)); if(sisfb_mode_rate_to_ddata(&ivideo->SiS_Pr, ivideo->mode_no, ivideo->rate_idx, &ivideo->default_var)) { if((ivideo->default_var.vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) { ivideo->default_var.pixclock <<= 1; } } if(ivideo->sisfb_ypan) { /* Maximize regardless of sisfb_max at startup */ ivideo->default_var.yres_virtual = sisfb_calc_maxyres(ivideo, &ivideo->default_var); if(ivideo->default_var.yres_virtual < ivideo->default_var.yres) { ivideo->default_var.yres_virtual = ivideo->default_var.yres; } } sisfb_calc_pitch(ivideo, &ivideo->default_var); ivideo->accel = 0; if(ivideo->sisfb_accel) { ivideo->accel = -1; #ifdef STUPID_ACCELF_TEXT_SHIT ivideo->default_var.accel_flags |= FB_ACCELF_TEXT; #endif } sisfb_initaccel(ivideo); #if defined(FBINFO_HWACCEL_DISABLED) && defined(FBINFO_HWACCEL_XPAN) sis_fb_info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | ((ivideo->accel) ? 0 : FBINFO_HWACCEL_DISABLED); #else sis_fb_info->flags = FBINFO_FLAG_DEFAULT; #endif sis_fb_info->var = ivideo->default_var; sis_fb_info->fix = ivideo->sisfb_fix; sis_fb_info->screen_base = ivideo->video_vbase + ivideo->video_offset; sis_fb_info->fbops = &sisfb_ops; sis_fb_info->pseudo_palette = ivideo->pseudo_palette; fb_alloc_cmap(&sis_fb_info->cmap, 256 , 0); printk(KERN_DEBUG "sisfb: Initial vbflags 0x%x\n", (int)ivideo->vbflags); #ifdef CONFIG_MTRR ivideo->mtrr = mtrr_add(ivideo->video_base, ivideo->video_size, MTRR_TYPE_WRCOMB, 1); if(ivideo->mtrr < 0) { printk(KERN_DEBUG "sisfb: Failed to add MTRRs\n"); } #endif if(register_framebuffer(sis_fb_info) < 0) { printk(KERN_ERR "sisfb: Fatal error: Failed to register framebuffer\n"); ret = -EINVAL; iounmap(ivideo->mmio_vbase); goto error_0; } ivideo->registered = 1; /* Enlist us */ ivideo->next = card_list; card_list = ivideo; printk(KERN_INFO "sisfb: 2D acceleration is %s, y-panning %s\n", ivideo->sisfb_accel ? "enabled" : "disabled", ivideo->sisfb_ypan ? (ivideo->sisfb_max ? "enabled (auto-max)" : "enabled (no auto-max)") : "disabled"); printk(KERN_INFO "fb%d: %s frame buffer device version %d.%d.%d\n", sis_fb_info->node, ivideo->myid, VER_MAJOR, VER_MINOR, VER_LEVEL); printk(KERN_INFO "sisfb: Copyright (C) 2001-2005 Thomas Winischhofer\n"); } /* if mode = "none" */ return 0; } /*****************************************************/ /* PCI DEVICE HANDLING */ /*****************************************************/ static void __devexit sisfb_remove(struct pci_dev *pdev) { struct sis_video_info *ivideo = pci_get_drvdata(pdev); struct fb_info *sis_fb_info = ivideo->memyselfandi; int registered = ivideo->registered; int modechanged = ivideo->modechanged; /* Unmap */ iounmap(ivideo->mmio_vbase); iounmap(ivideo->video_vbase); /* Release mem regions */ release_mem_region(ivideo->video_base, ivideo->video_size); release_mem_region(ivideo->mmio_base, ivideo->mmio_size); vfree(ivideo->bios_abase); if(ivideo->lpcdev) pci_dev_put(ivideo->lpcdev); if(ivideo->nbridge) pci_dev_put(ivideo->nbridge); #ifdef CONFIG_MTRR /* Release MTRR region */ if(ivideo->mtrr >= 0) mtrr_del(ivideo->mtrr, ivideo->video_base, ivideo->video_size); #endif pci_set_drvdata(pdev, NULL); /* If device was disabled when starting, disable * it when quitting. */ if(!ivideo->sisvga_enabled) pci_disable_device(pdev); /* Unregister the framebuffer */ if(ivideo->registered) { unregister_framebuffer(sis_fb_info); framebuffer_release(sis_fb_info); } /* OK, our ivideo is gone for good from here. */ /* TODO: Restore the initial mode * This sounds easy but is as good as impossible * on many machines with SiS chip and video bridge * since text modes are always set up differently * from machine to machine. Depends on the type * of integration between chipset and bridge. */ if(registered && modechanged) printk(KERN_INFO "sisfb: Restoring of text mode not supported yet\n"); }; static struct pci_driver sisfb_driver = { .name = "sisfb", .id_table = sisfb_pci_table, .probe = sisfb_probe, .remove = __devexit_p(sisfb_remove) }; static int __init sisfb_init(void) { #ifndef MODULE char *options = NULL; if(fb_get_options("sisfb", &options)) return -ENODEV; sisfb_setup(options); #endif return pci_register_driver(&sisfb_driver); } #ifndef MODULE module_init(sisfb_init); #endif /*****************************************************/ /* MODULE */ /*****************************************************/ #ifdef MODULE static char *mode = NULL; static int vesa = -1; static unsigned int rate = 0; static unsigned int crt1off = 1; static unsigned int mem = 0; static char *forcecrt2type = NULL; static int forcecrt1 = -1; static int pdc = -1; static int pdc1 = -1; static int noaccel = -1; static int noypan = -1; static int nomax = -1; static int userom = -1; static int useoem = -1; static char *tvstandard = NULL; static int nocrt2rate = 0; static int scalelcd = -1; static char *specialtiming = NULL; static int lvdshl = -1; static int tvxposoffset = 0, tvyposoffset = 0; #if !defined(__i386__) && !defined(__x86_64__) static int resetcard = 0; static int videoram = 0; #endif static int __init sisfb_init_module(void) { sisfb_setdefaultparms(); if(rate) sisfb_parm_rate = rate; if((scalelcd == 0) || (scalelcd == 1)) sisfb_scalelcd = scalelcd ^ 1; /* Need to check crt2 type first for fstn/dstn */ if(forcecrt2type) sisfb_search_crt2type(forcecrt2type); if(tvstandard) sisfb_search_tvstd(tvstandard); if(mode) sisfb_search_mode(mode, false); else if(vesa != -1) sisfb_search_vesamode(vesa, false); sisfb_crt1off = (crt1off == 0) ? 1 : 0; sisfb_forcecrt1 = forcecrt1; if(forcecrt1 == 1) sisfb_crt1off = 0; else if(forcecrt1 == 0) sisfb_crt1off = 1; if(noaccel == 1) sisfb_accel = 0; else if(noaccel == 0) sisfb_accel = 1; if(noypan == 1) sisfb_ypan = 0; else if(noypan == 0) sisfb_ypan = 1; if(nomax == 1) sisfb_max = 0; else if(nomax == 0) sisfb_max = 1; if(mem) sisfb_parm_mem = mem; if(userom != -1) sisfb_userom = userom; if(useoem != -1) sisfb_useoem = useoem; if(pdc != -1) sisfb_pdc = (pdc & 0x7f); if(pdc1 != -1) sisfb_pdca = (pdc1 & 0x1f); sisfb_nocrt2rate = nocrt2rate; if(specialtiming) sisfb_search_specialtiming(specialtiming); if((lvdshl >= 0) && (lvdshl <= 3)) sisfb_lvdshl = lvdshl; sisfb_tvxposoffset = tvxposoffset; sisfb_tvyposoffset = tvyposoffset; #if !defined(__i386__) && !defined(__x86_64__) sisfb_resetcard = (resetcard) ? 1 : 0; if(videoram) sisfb_videoram = videoram; #endif return sisfb_init(); } static void __exit sisfb_remove_module(void) { pci_unregister_driver(&sisfb_driver); printk(KERN_DEBUG "sisfb: Module unloaded\n"); } module_init(sisfb_init_module); module_exit(sisfb_remove_module); MODULE_DESCRIPTION("SiS 300/540/630/730/315/55x/65x/661/74x/330/76x/34x, XGI V3XT/V5/V8/Z7 framebuffer device driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Winischhofer <thomas@winischhofer.net>, Others"); module_param(mem, int, 0); module_param(noaccel, int, 0); module_param(noypan, int, 0); module_param(nomax, int, 0); module_param(userom, int, 0); module_param(useoem, int, 0); module_param(mode, charp, 0); module_param(vesa, int, 0); module_param(rate, int, 0); module_param(forcecrt1, int, 0); module_param(forcecrt2type, charp, 0); module_param(scalelcd, int, 0); module_param(pdc, int, 0); module_param(pdc1, int, 0); module_param(specialtiming, charp, 0); module_param(lvdshl, int, 0); module_param(tvstandard, charp, 0); module_param(tvxposoffset, int, 0); module_param(tvyposoffset, int, 0); module_param(nocrt2rate, int, 0); #if !defined(__i386__) && !defined(__x86_64__) module_param(resetcard, int, 0); module_param(videoram, int, 0); #endif MODULE_PARM_DESC(mem, "\nDetermines the beginning of the video memory heap in KB. This heap is used\n" "for video RAM management for eg. DRM/DRI. On 300 series, the default depends\n" "on the amount of video RAM available. If 8MB of video RAM or less is available,\n" "the heap starts at 4096KB, if between 8 and 16MB are available at 8192KB,\n" "otherwise at 12288KB. On 315/330/340 series, the heap size is 32KB by default.\n" "The value is to be specified without 'KB'.\n"); MODULE_PARM_DESC(noaccel, "\nIf set to anything other than 0, 2D acceleration will be disabled.\n" "(default: 0)\n"); MODULE_PARM_DESC(noypan, "\nIf set to anything other than 0, y-panning will be disabled and scrolling\n" "will be performed by redrawing the screen. (default: 0)\n"); MODULE_PARM_DESC(nomax, "\nIf y-panning is enabled, sisfb will by default use the entire available video\n" "memory for the virtual screen in order to optimize scrolling performance. If\n" "this is set to anything other than 0, sisfb will not do this and thereby \n" "enable the user to positively specify a virtual Y size of the screen using\n" "fbset. (default: 0)\n"); MODULE_PARM_DESC(mode, "\nSelects the desired default display mode in the format XxYxDepth,\n" "eg. 1024x768x16. Other formats supported include XxY-Depth and\n" "XxY-Depth@Rate. If the parameter is only one (decimal or hexadecimal)\n" "number, it will be interpreted as a VESA mode number. (default: 800x600x8)\n"); MODULE_PARM_DESC(vesa, "\nSelects the desired default display mode by VESA defined mode number, eg.\n" "0x117 (default: 0x0103)\n"); MODULE_PARM_DESC(rate, "\nSelects the desired vertical refresh rate for CRT1 (external VGA) in Hz.\n" "If the mode is specified in the format XxY-Depth@Rate, this parameter\n" "will be ignored (default: 60)\n"); MODULE_PARM_DESC(forcecrt1, "\nNormally, the driver autodetects whether or not CRT1 (external VGA) is \n" "connected. With this option, the detection can be overridden (1=CRT1 ON,\n" "0=CRT1 OFF) (default: [autodetected])\n"); MODULE_PARM_DESC(forcecrt2type, "\nIf this option is omitted, the driver autodetects CRT2 output devices, such as\n" "LCD, TV or secondary VGA. With this option, this autodetection can be\n" "overridden. Possible parameters are LCD, TV, VGA or NONE. NONE disables CRT2.\n" "On systems with a SiS video bridge, parameters SVIDEO, COMPOSITE or SCART can\n" "be used instead of TV to override the TV detection. Furthermore, on systems\n" "with a SiS video bridge, SVIDEO+COMPOSITE, HIVISION, YPBPR480I, YPBPR480P,\n" "YPBPR720P and YPBPR1080I are understood. However, whether or not these work\n" "depends on the very hardware in use. (default: [autodetected])\n"); MODULE_PARM_DESC(scalelcd, "\nSetting this to 1 will force the driver to scale the LCD image to the panel's\n" "native resolution. Setting it to 0 will disable scaling; LVDS panels will\n" "show black bars around the image, TMDS panels will probably do the scaling\n" "themselves. Default: 1 on LVDS panels, 0 on TMDS panels\n"); MODULE_PARM_DESC(pdc, "\nThis is for manually selecting the LCD panel delay compensation. The driver\n" "should detect this correctly in most cases; however, sometimes this is not\n" "possible. If you see 'small waves' on the LCD, try setting this to 4, 32 or 24\n" "on a 300 series chipset; 6 on other chipsets. If the problem persists, try\n" "other values (on 300 series: between 4 and 60 in steps of 4; otherwise: any\n" "value from 0 to 31). (default: autodetected, if LCD is active during start)\n"); #ifdef CONFIG_FB_SIS_315 MODULE_PARM_DESC(pdc1, "\nThis is same as pdc, but for LCD-via CRT1. Hence, this is for the 315/330/340\n" "series only. (default: autodetected if LCD is in LCD-via-CRT1 mode during\n" "startup) - Note: currently, this has no effect because LCD-via-CRT1 is not\n" "implemented yet.\n"); #endif MODULE_PARM_DESC(specialtiming, "\nPlease refer to documentation for more information on this option.\n"); MODULE_PARM_DESC(lvdshl, "\nPlease refer to documentation for more information on this option.\n"); MODULE_PARM_DESC(tvstandard, "\nThis allows overriding the BIOS default for the TV standard. Valid choices are\n" "pal, ntsc, palm and paln. (default: [auto; pal or ntsc only])\n"); MODULE_PARM_DESC(tvxposoffset, "\nRelocate TV output horizontally. Possible parameters: -32 through 32.\n" "Default: 0\n"); MODULE_PARM_DESC(tvyposoffset, "\nRelocate TV output vertically. Possible parameters: -32 through 32.\n" "Default: 0\n"); MODULE_PARM_DESC(nocrt2rate, "\nSetting this to 1 will force the driver to use the default refresh rate for\n" "CRT2 if CRT2 type is VGA. (default: 0, use same rate as CRT1)\n"); #if !defined(__i386__) && !defined(__x86_64__) #ifdef CONFIG_FB_SIS_300 MODULE_PARM_DESC(resetcard, "\nSet this to 1 in order to reset (POST) the card on non-x86 machines where\n" "the BIOS did not POST the card (only supported for SiS 300/305 and XGI cards\n" "currently). Default: 0\n"); MODULE_PARM_DESC(videoram, "\nSet this to the amount of video RAM (in kilobyte) the card has. Required on\n" "some non-x86 architectures where the memory auto detection fails. Only\n" "relevant if resetcard is set, too. SiS300/305 only. Default: [auto-detect]\n"); #endif #endif #endif /* /MODULE */ /* _GPL only for new symbols. */ EXPORT_SYMBOL(sis_malloc); EXPORT_SYMBOL(sis_free); EXPORT_SYMBOL_GPL(sis_malloc_new); EXPORT_SYMBOL_GPL(sis_free_new);
gpl-2.0
romracer/sgs2sr-kernel
drivers/staging/usbip/stub_dev.c
803
12166
/* * Copyright (C) 2003-2008 Takahiro Hirofuchi * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <linux/slab.h> #include "usbip_common.h" #include "stub.h" static int stub_probe(struct usb_interface *interface, const struct usb_device_id *id); static void stub_disconnect(struct usb_interface *interface); /* * Define device IDs here if you want to explicitly limit exportable devices. * In the most cases, wild card matching will be ok because driver binding can * be changed dynamically by a userland program. */ static struct usb_device_id stub_table[] = { #if 0 /* just an example */ { USB_DEVICE(0x05ac, 0x0301) }, /* Mac 1 button mouse */ { USB_DEVICE(0x0430, 0x0009) }, /* Plat Home Keyboard */ { USB_DEVICE(0x059b, 0x0001) }, /* Iomega USB Zip 100 */ { USB_DEVICE(0x04b3, 0x4427) }, /* IBM USB CD-ROM */ { USB_DEVICE(0x05a9, 0xa511) }, /* LifeView USB cam */ { USB_DEVICE(0x55aa, 0x0201) }, /* Imation card reader */ { USB_DEVICE(0x046d, 0x0870) }, /* Qcam Express(QV-30) */ { USB_DEVICE(0x04bb, 0x0101) }, /* IO-DATA HD 120GB */ { USB_DEVICE(0x04bb, 0x0904) }, /* IO-DATA USB-ET/TX */ { USB_DEVICE(0x04bb, 0x0201) }, /* IO-DATA USB-ET/TX */ { USB_DEVICE(0x08bb, 0x2702) }, /* ONKYO USB Speaker */ { USB_DEVICE(0x046d, 0x08b2) }, /* Logicool Qcam 4000 Pro */ #endif /* magic for wild card */ { .driver_info = 1 }, { 0, } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, stub_table); struct usb_driver stub_driver = { .name = "usbip", .probe = stub_probe, .disconnect = stub_disconnect, .id_table = stub_table, }; /*-------------------------------------------------------------------------*/ /* Define sysfs entries for a usbip-bound device */ /* * usbip_status shows status of usbip as long as this driver is bound to the * target device. */ static ssize_t show_status(struct device *dev, struct device_attribute *attr, char *buf) { struct stub_device *sdev = dev_get_drvdata(dev); int status; if (!sdev) { dev_err(dev, "sdev is null\n"); return -ENODEV; } spin_lock(&sdev->ud.lock); status = sdev->ud.status; spin_unlock(&sdev->ud.lock); return snprintf(buf, PAGE_SIZE, "%d\n", status); } static DEVICE_ATTR(usbip_status, S_IRUGO, show_status, NULL); /* * usbip_sockfd gets a socket descriptor of an established TCP connection that * is used to transfer usbip requests by kernel threads. -1 is a magic number * by which usbip connection is finished. */ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct stub_device *sdev = dev_get_drvdata(dev); int sockfd = 0; struct socket *socket; if (!sdev) { dev_err(dev, "sdev is null\n"); return -ENODEV; } sscanf(buf, "%d", &sockfd); if (sockfd != -1) { dev_info(dev, "stub up\n"); spin_lock(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_AVAILABLE) { dev_err(dev, "not ready\n"); spin_unlock(&sdev->ud.lock); return -EINVAL; } socket = sockfd_to_socket(sockfd); if (!socket) { spin_unlock(&sdev->ud.lock); return -EINVAL; } #if 0 setnodelay(socket); setkeepalive(socket); setreuse(socket); #endif sdev->ud.tcp_socket = socket; spin_unlock(&sdev->ud.lock); usbip_start_threads(&sdev->ud); spin_lock(&sdev->ud.lock); sdev->ud.status = SDEV_ST_USED; spin_unlock(&sdev->ud.lock); } else { dev_info(dev, "stub down\n"); spin_lock(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_USED) { spin_unlock(&sdev->ud.lock); return -EINVAL; } spin_unlock(&sdev->ud.lock); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); } return count; } static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd); static int stub_add_files(struct device *dev) { int err = 0; err = device_create_file(dev, &dev_attr_usbip_status); if (err) goto err_status; err = device_create_file(dev, &dev_attr_usbip_sockfd); if (err) goto err_sockfd; err = device_create_file(dev, &dev_attr_usbip_debug); if (err) goto err_debug; return 0; err_debug: device_remove_file(dev, &dev_attr_usbip_sockfd); err_sockfd: device_remove_file(dev, &dev_attr_usbip_status); err_status: return err; } static void stub_remove_files(struct device *dev) { device_remove_file(dev, &dev_attr_usbip_status); device_remove_file(dev, &dev_attr_usbip_sockfd); device_remove_file(dev, &dev_attr_usbip_debug); } /*-------------------------------------------------------------------------*/ /* Event handler functions called by an event handler thread */ static void stub_shutdown_connection(struct usbip_device *ud) { struct stub_device *sdev = container_of(ud, struct stub_device, ud); /* * When removing an exported device, kernel panic sometimes occurred * and then EIP was sk_wait_data of stub_rx thread. Is this because * sk_wait_data returned though stub_rx thread was already finished by * step 1? */ if (ud->tcp_socket) { usbip_udbg("shutdown tcp_socket %p\n", ud->tcp_socket); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } /* 1. stop threads */ usbip_stop_threads(ud); /* 2. close the socket */ /* * tcp_socket is freed after threads are killed. * So usbip_xmit do not touch NULL socket. */ if (ud->tcp_socket) { sock_release(ud->tcp_socket); ud->tcp_socket = NULL; } /* 3. free used data */ stub_device_cleanup_urbs(sdev); /* 4. free stub_unlink */ { unsigned long flags; struct stub_unlink *unlink, *tmp; spin_lock_irqsave(&sdev->priv_lock, flags); list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { list_del(&unlink->list); kfree(unlink); } list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { list_del(&unlink->list); kfree(unlink); } spin_unlock_irqrestore(&sdev->priv_lock, flags); } } static void stub_device_reset(struct usbip_device *ud) { struct stub_device *sdev = container_of(ud, struct stub_device, ud); struct usb_device *udev = interface_to_usbdev(sdev->interface); int ret; usbip_udbg("device reset"); ret = usb_lock_device_for_reset(udev, sdev->interface); if (ret < 0) { dev_err(&udev->dev, "lock for reset\n"); spin_lock(&ud->lock); ud->status = SDEV_ST_ERROR; spin_unlock(&ud->lock); return; } /* try to reset the device */ ret = usb_reset_device(udev); usb_unlock_device(udev); spin_lock(&ud->lock); if (ret) { dev_err(&udev->dev, "device reset\n"); ud->status = SDEV_ST_ERROR; } else { dev_info(&udev->dev, "device reset\n"); ud->status = SDEV_ST_AVAILABLE; } spin_unlock(&ud->lock); return; } static void stub_device_unusable(struct usbip_device *ud) { spin_lock(&ud->lock); ud->status = SDEV_ST_ERROR; spin_unlock(&ud->lock); } /*-------------------------------------------------------------------------*/ /** * stub_device_alloc - allocate a new stub_device struct * @interface: usb_interface of a new device * * Allocates and initializes a new stub_device struct. */ static struct stub_device *stub_device_alloc(struct usb_interface *interface) { struct stub_device *sdev; int busnum = interface_to_busnum(interface); int devnum = interface_to_devnum(interface); dev_dbg(&interface->dev, "allocating stub device"); /* yes, it's a new device */ sdev = kzalloc(sizeof(struct stub_device), GFP_KERNEL); if (!sdev) { dev_err(&interface->dev, "no memory for stub_device\n"); return NULL; } sdev->interface = interface; /* * devid is defined with devnum when this driver is first allocated. * devnum may change later if a device is reset. However, devid never * changes during a usbip connection. */ sdev->devid = (busnum << 16) | devnum; usbip_task_init(&sdev->ud.tcp_rx, "stub_rx", stub_rx_loop); usbip_task_init(&sdev->ud.tcp_tx, "stub_tx", stub_tx_loop); sdev->ud.side = USBIP_STUB; sdev->ud.status = SDEV_ST_AVAILABLE; /* sdev->ud.lock = SPIN_LOCK_UNLOCKED; */ spin_lock_init(&sdev->ud.lock); sdev->ud.tcp_socket = NULL; INIT_LIST_HEAD(&sdev->priv_init); INIT_LIST_HEAD(&sdev->priv_tx); INIT_LIST_HEAD(&sdev->priv_free); INIT_LIST_HEAD(&sdev->unlink_free); INIT_LIST_HEAD(&sdev->unlink_tx); /* sdev->priv_lock = SPIN_LOCK_UNLOCKED; */ spin_lock_init(&sdev->priv_lock); init_waitqueue_head(&sdev->tx_waitq); sdev->ud.eh_ops.shutdown = stub_shutdown_connection; sdev->ud.eh_ops.reset = stub_device_reset; sdev->ud.eh_ops.unusable = stub_device_unusable; usbip_start_eh(&sdev->ud); usbip_udbg("register new interface\n"); return sdev; } static int stub_device_free(struct stub_device *sdev) { if (!sdev) return -EINVAL; kfree(sdev); usbip_udbg("kfree udev ok\n"); return 0; } /*-------------------------------------------------------------------------*/ /* * If a usb device has multiple active interfaces, this driver is bound to all * the active interfaces. However, usbip exports *a* usb device (i.e., not *an* * active interface). Currently, a userland program must ensure that it * looks at the usbip's sysfs entries of only the first active interface. * * TODO: use "struct usb_device_driver" to bind a usb device. * However, it seems it is not fully supported in mainline kernel yet * (2.6.19.2). */ static int stub_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct stub_device *sdev = NULL; const char *udev_busid = dev_name(interface->dev.parent); int err = 0; dev_dbg(&interface->dev, "Enter\n"); /* check we should claim or not by busid_table */ if (match_busid(udev_busid)) { dev_info(&interface->dev, "this device %s is not in match_busid table. skip!\n", udev_busid); /* * Return value should be ENODEV or ENOXIO to continue trying * other matched drivers by the driver core. * See driver_probe_device() in driver/base/dd.c */ return -ENODEV; } if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { usbip_udbg("this device %s is a usb hub device. skip!\n", udev_busid); return -ENODEV; } if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { usbip_udbg("this device %s is attached on vhci_hcd. skip!\n", udev_busid); return -ENODEV; } /* ok. this is my device. */ sdev = stub_device_alloc(interface); if (!sdev) return -ENOMEM; dev_info(&interface->dev, "USB/IP Stub: register a new interface " "(bus %u dev %u ifn %u)\n", udev->bus->busnum, udev->devnum, interface->cur_altsetting->desc.bInterfaceNumber); /* set private data to usb_interface */ usb_set_intfdata(interface, sdev); err = stub_add_files(&interface->dev); if (err) { dev_err(&interface->dev, "create sysfs files for %s\n", udev_busid); return err; } return 0; } /* * called in usb_disconnect() or usb_deregister() * but only if actconfig(active configuration) exists */ static void stub_disconnect(struct usb_interface *interface) { struct stub_device *sdev = usb_get_intfdata(interface); usbip_udbg("Enter\n"); /* get stub_device */ if (!sdev) { err(" could not get device from inteface data"); /* BUG(); */ return; } usb_set_intfdata(interface, NULL); /* * NOTE: * rx/tx threads are invoked for each usb_device. */ stub_remove_files(&interface->dev); /* 1. shutdown the current connection */ usbip_event_add(&sdev->ud, SDEV_EVENT_REMOVED); /* 2. wait for the stop of the event handler */ usbip_stop_eh(&sdev->ud); /* 3. free sdev */ stub_device_free(sdev); usbip_udbg("bye\n"); }
gpl-2.0
AnesHadzi/linux-socfpga
drivers/ide/pmac.c
803
46441
/* * Support for IDE interfaces on PowerMacs. * * These IDE interfaces are memory-mapped and have a DBDMA channel * for doing DMA. * * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Some code taken from drivers/ide/ide-dma.c: * * Copyright (c) 1995-1998 Mark Lord * * TODO: - Use pre-calculated (kauai) timing tables all the time and * get rid of the "rounded" tables used previously, so we have the * same table format for all controllers and can then just have one * big table * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/notifier.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/dbdma.h> #include <asm/ide.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/sections.h> #include <asm/irq.h> #include <asm/mediabay.h> #define DRV_NAME "ide-pmac" #undef IDE_PMAC_DEBUG #define DMA_WAIT_TIMEOUT 50 typedef struct pmac_ide_hwif { unsigned long regbase; int irq; int kind; int aapl_bus_id; unsigned broken_dma : 1; unsigned broken_dma_warn : 1; struct device_node* node; struct macio_dev *mdev; u32 timings[4]; volatile u32 __iomem * *kauai_fcr; ide_hwif_t *hwif; /* Those fields are duplicating what is in hwif. We currently * can't use the hwif ones because of some assumptions that are * beeing done by the generic code about the kind of dma controller * and format of the dma table. This will have to be fixed though. */ volatile struct dbdma_regs __iomem * dma_regs; struct dbdma_cmd* dma_table_cpu; } pmac_ide_hwif_t; enum { controller_ohare, /* OHare based */ controller_heathrow, /* Heathrow/Paddington */ controller_kl_ata3, /* KeyLargo ATA-3 */ controller_kl_ata4, /* KeyLargo ATA-4 */ controller_un_ata6, /* UniNorth2 ATA-6 */ controller_k2_ata6, /* K2 ATA-6 */ controller_sh_ata6, /* Shasta ATA-6 */ }; static const char* model_name[] = { "OHare ATA", /* OHare based */ "Heathrow ATA", /* Heathrow/Paddington */ "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */ "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */ "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */ "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */ "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */ }; /* * Extra registers, both 32-bit little-endian */ #define IDE_TIMING_CONFIG 0x200 #define IDE_INTERRUPT 0x300 /* Kauai (U2) ATA has different register setup */ #define IDE_KAUAI_PIO_CONFIG 0x200 #define IDE_KAUAI_ULTRA_CONFIG 0x210 #define IDE_KAUAI_POLL_CONFIG 0x220 /* * Timing configuration register definitions */ /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */ #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS) #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS) #define IDE_SYSCLK_NS 30 /* 33Mhz cell */ #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ /* 133Mhz cell, found in shasta. * See comments about 100 Mhz Uninorth 2... * Note that PIO_MASK and MDMA_MASK seem to overlap */ #define TR_133_PIOREG_PIO_MASK 0xff000fff #define TR_133_PIOREG_MDMA_MASK 0x00fff800 #define TR_133_UDMAREG_UDMA_MASK 0x0003ffff #define TR_133_UDMAREG_UDMA_EN 0x00000001 /* 100Mhz cell, found in Uninorth 2. I don't have much infos about * this one yet, it appears as a pci device (106b/0033) on uninorth * internal PCI bus and it's clock is controlled like gem or fw. It * appears to be an evolution of keylargo ATA4 with a timing register * extended to 2 32bits registers and a similar DBDMA channel. Other * registers seem to exist but I can't tell much about them. * * So far, I'm using pre-calculated tables for this extracted from * the values used by the MacOS X driver. * * The "PIO" register controls PIO and MDMA timings, the "ULTRA" * register controls the UDMA timings. At least, it seems bit 0 * of this one enables UDMA vs. MDMA, and bits 4..7 are the * cycle time in units of 10ns. Bits 8..15 are used by I don't * know their meaning yet */ #define TR_100_PIOREG_PIO_MASK 0xff000fff #define TR_100_PIOREG_MDMA_MASK 0x00fff000 #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff #define TR_100_UDMAREG_UDMA_EN 0x00000001 /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on * 40 connector cable and to 4 on 80 connector one. * Clock unit is 15ns (66Mhz) * * 3 Values can be programmed: * - Write data setup, which appears to match the cycle time. They * also call it DIOW setup. * - Ready to pause time (from spec) * - Address setup. That one is weird. I don't see where exactly * it fits in UDMA cycles, I got it's name from an obscure piece * of commented out code in Darwin. They leave it to 0, we do as * well, despite a comment that would lead to think it has a * min value of 45ns. * Apple also add 60ns to the write data setup (or cycle time ?) on * reads. */ #define TR_66_UDMA_MASK 0xfff00000 #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */ #define TR_66_UDMA_ADDRSETUP_SHIFT 29 #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */ #define TR_66_UDMA_RDY2PAUS_SHIFT 25 #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */ #define TR_66_UDMA_WRDATASETUP_SHIFT 21 #define TR_66_MDMA_MASK 0x000ffc00 #define TR_66_MDMA_RECOVERY_MASK 0x000f8000 #define TR_66_MDMA_RECOVERY_SHIFT 15 #define TR_66_MDMA_ACCESS_MASK 0x00007c00 #define TR_66_MDMA_ACCESS_SHIFT 10 #define TR_66_PIO_MASK 0x000003ff #define TR_66_PIO_RECOVERY_MASK 0x000003e0 #define TR_66_PIO_RECOVERY_SHIFT 5 #define TR_66_PIO_ACCESS_MASK 0x0000001f #define TR_66_PIO_ACCESS_SHIFT 0 /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo * Can do pio & mdma modes, clock unit is 30ns (33Mhz) * * The access time and recovery time can be programmed. Some older * Darwin code base limit OHare to 150ns cycle time. I decided to do * the same here fore safety against broken old hardware ;) * The HalfTick bit, when set, adds half a clock (15ns) to the access * time and removes one from recovery. It's not supported on KeyLargo * implementation afaik. The E bit appears to be set for PIO mode 0 and * is used to reach long timings used in this mode. */ #define TR_33_MDMA_MASK 0x003ff800 #define TR_33_MDMA_RECOVERY_MASK 0x001f0000 #define TR_33_MDMA_RECOVERY_SHIFT 16 #define TR_33_MDMA_ACCESS_MASK 0x0000f800 #define TR_33_MDMA_ACCESS_SHIFT 11 #define TR_33_MDMA_HALFTICK 0x00200000 #define TR_33_PIO_MASK 0x000007ff #define TR_33_PIO_E 0x00000400 #define TR_33_PIO_RECOVERY_MASK 0x000003e0 #define TR_33_PIO_RECOVERY_SHIFT 5 #define TR_33_PIO_ACCESS_MASK 0x0000001f #define TR_33_PIO_ACCESS_SHIFT 0 /* * Interrupt register definitions */ #define IDE_INTR_DMA 0x80000000 #define IDE_INTR_DEVICE 0x40000000 /* * FCR Register on Kauai. Not sure what bit 0x4 is ... */ #define KAUAI_FCR_UATA_MAGIC 0x00000004 #define KAUAI_FCR_UATA_RESET_N 0x00000002 #define KAUAI_FCR_UATA_ENABLE 0x00000001 /* Rounded Multiword DMA timings * * I gave up finding a generic formula for all controller * types and instead, built tables based on timing values * used by Apple in Darwin's implementation. */ struct mdma_timings_t { int accessTime; int recoveryTime; int cycleTime; }; struct mdma_timings_t mdma_timings_33[] = { { 240, 240, 480 }, { 180, 180, 360 }, { 135, 135, 270 }, { 120, 120, 240 }, { 105, 105, 210 }, { 90, 90, 180 }, { 75, 75, 150 }, { 75, 45, 120 }, { 0, 0, 0 } }; struct mdma_timings_t mdma_timings_33k[] = { { 240, 240, 480 }, { 180, 180, 360 }, { 150, 150, 300 }, { 120, 120, 240 }, { 90, 120, 210 }, { 90, 90, 180 }, { 90, 60, 150 }, { 90, 30, 120 }, { 0, 0, 0 } }; struct mdma_timings_t mdma_timings_66[] = { { 240, 240, 480 }, { 180, 180, 360 }, { 135, 135, 270 }, { 120, 120, 240 }, { 105, 105, 210 }, { 90, 90, 180 }, { 90, 75, 165 }, { 75, 45, 120 }, { 0, 0, 0 } }; /* KeyLargo ATA-4 Ultra DMA timings (rounded) */ struct { int addrSetup; /* ??? */ int rdy2pause; int wrDataSetup; } kl66_udma_timings[] = { { 0, 180, 120 }, /* Mode 0 */ { 0, 150, 90 }, /* 1 */ { 0, 120, 60 }, /* 2 */ { 0, 90, 45 }, /* 3 */ { 0, 90, 30 } /* 4 */ }; /* UniNorth 2 ATA/100 timings */ struct kauai_timing { int cycle_time; u32 timing_reg; }; static struct kauai_timing kauai_pio_timings[] = { { 930 , 0x08000fff }, { 600 , 0x08000a92 }, { 383 , 0x0800060f }, { 360 , 0x08000492 }, { 330 , 0x0800048f }, { 300 , 0x080003cf }, { 270 , 0x080003cc }, { 240 , 0x0800038b }, { 239 , 0x0800030c }, { 180 , 0x05000249 }, { 120 , 0x04000148 }, { 0 , 0 }, }; static struct kauai_timing kauai_mdma_timings[] = { { 1260 , 0x00fff000 }, { 480 , 0x00618000 }, { 360 , 0x00492000 }, { 270 , 0x0038e000 }, { 240 , 0x0030c000 }, { 210 , 0x002cb000 }, { 180 , 0x00249000 }, { 150 , 0x00209000 }, { 120 , 0x00148000 }, { 0 , 0 }, }; static struct kauai_timing kauai_udma_timings[] = { { 120 , 0x000070c0 }, { 90 , 0x00005d80 }, { 60 , 0x00004a60 }, { 45 , 0x00003a50 }, { 30 , 0x00002a30 }, { 20 , 0x00002921 }, { 0 , 0 }, }; static struct kauai_timing shasta_pio_timings[] = { { 930 , 0x08000fff }, { 600 , 0x0A000c97 }, { 383 , 0x07000712 }, { 360 , 0x040003cd }, { 330 , 0x040003cd }, { 300 , 0x040003cd }, { 270 , 0x040003cd }, { 240 , 0x040003cd }, { 239 , 0x040003cd }, { 180 , 0x0400028b }, { 120 , 0x0400010a }, { 0 , 0 }, }; static struct kauai_timing shasta_mdma_timings[] = { { 1260 , 0x00fff000 }, { 480 , 0x00820800 }, { 360 , 0x00820800 }, { 270 , 0x00820800 }, { 240 , 0x00820800 }, { 210 , 0x00820800 }, { 180 , 0x00820800 }, { 150 , 0x0028b000 }, { 120 , 0x001ca000 }, { 0 , 0 }, }; static struct kauai_timing shasta_udma133_timings[] = { { 120 , 0x00035901, }, { 90 , 0x000348b1, }, { 60 , 0x00033881, }, { 45 , 0x00033861, }, { 30 , 0x00033841, }, { 20 , 0x00033031, }, { 15 , 0x00033021, }, { 0 , 0 }, }; static inline u32 kauai_lookup_timing(struct kauai_timing* table, int cycle_time) { int i; for (i=0; table[i].cycle_time; i++) if (cycle_time > table[i+1].cycle_time) return table[i].timing_reg; BUG(); return 0; } /* allow up to 256 DBDMA commands per xfer */ #define MAX_DCMDS 256 /* * Wait 1s for disk to answer on IDE bus after a hard reset * of the device (via GPIO/FCR). * * Some devices seem to "pollute" the bus even after dropping * the BSY bit (typically some combo drives slave on the UDMA * bus) after a hard reset. Since we hard reset all drives on * KeyLargo ATA66, we have to keep that delay around. I may end * up not hard resetting anymore on these and keep the delay only * for older interfaces instead (we have to reset when coming * from MacOS...) --BenH. */ #define IDE_WAKEUP_DELAY (1*HZ) static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *); #define PMAC_IDE_REG(x) \ ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) /* * Apply the timings of the proper unit (master/slave) to the shared * timing register when selecting that unit. This version is for * ASICs with a single timing register */ static void pmac_ide_apply_timings(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); if (drive->dn & 1) writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG)); else writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG)); (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG)); } /* * Apply the timings of the proper unit (master/slave) to the shared * timing register when selecting that unit. This version is for * ASICs with a dual timing register (Kauai) */ static void pmac_ide_kauai_apply_timings(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); if (drive->dn & 1) { writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG)); writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG)); } else { writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG)); writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG)); } (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG)); } /* * Force an update of controller timing values for a given drive */ static void pmac_ide_do_update_timings(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); if (pmif->kind == controller_sh_ata6 || pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6) pmac_ide_kauai_apply_timings(drive); else pmac_ide_apply_timings(drive); } static void pmac_dev_select(ide_drive_t *drive) { pmac_ide_apply_timings(drive); writeb(drive->select | ATA_DEVICE_OBS, (void __iomem *)drive->hwif->io_ports.device_addr); } static void pmac_kauai_dev_select(ide_drive_t *drive) { pmac_ide_kauai_apply_timings(drive); writeb(drive->select | ATA_DEVICE_OBS, (void __iomem *)drive->hwif->io_ports.device_addr); } static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd) { writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); (void)readl((void __iomem *)(hwif->io_ports.data_addr + IDE_TIMING_CONFIG)); } static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl) { writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); (void)readl((void __iomem *)(hwif->io_ports.data_addr + IDE_TIMING_CONFIG)); } /* * Old tuning functions (called on hdparm -p), sets up drive PIO timings */ static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); u32 *timings, t; unsigned accessTicks, recTicks; unsigned accessTime, recTime; unsigned int cycle_time; /* which drive is it ? */ timings = &pmif->timings[drive->dn & 1]; t = *timings; cycle_time = ide_pio_cycle_time(drive, pio); switch (pmif->kind) { case controller_sh_ata6: { /* 133Mhz cell */ u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time); t = (t & ~TR_133_PIOREG_PIO_MASK) | tr; break; } case controller_un_ata6: case controller_k2_ata6: { /* 100Mhz cell */ u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time); t = (t & ~TR_100_PIOREG_PIO_MASK) | tr; break; } case controller_kl_ata4: /* 66Mhz cell */ recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); recTicks = SYSCLK_TICKS_66(recTime); recTicks = min(recTicks, 0x1fU); t = (t & ~TR_66_PIO_MASK) | (accessTicks << TR_66_PIO_ACCESS_SHIFT) | (recTicks << TR_66_PIO_RECOVERY_SHIFT); break; default: { /* 33Mhz cell */ int ebit = 0; recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS(accessTime); accessTicks = min(accessTicks, 0x1fU); accessTicks = max(accessTicks, 4U); recTicks = SYSCLK_TICKS(recTime); recTicks = min(recTicks, 0x1fU); recTicks = max(recTicks, 5U) - 4; if (recTicks > 9) { recTicks--; /* guess, but it's only for PIO0, so... */ ebit = 1; } t = (t & ~TR_33_PIO_MASK) | (accessTicks << TR_33_PIO_ACCESS_SHIFT) | (recTicks << TR_33_PIO_RECOVERY_SHIFT); if (ebit) t |= TR_33_PIO_E; break; } } #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n", drive->name, pio, *timings); #endif *timings = t; pmac_ide_do_update_timings(drive); } /* * Calculate KeyLargo ATA/66 UDMA timings */ static int set_timings_udma_ata4(u32 *timings, u8 speed) { unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks; if (speed > XFER_UDMA_4) return 1; rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause); wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup); addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup); *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) | (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) | (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) | (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) | TR_66_UDMA_EN; #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n", speed & 0xf, *timings); #endif return 0; } /* * Calculate Kauai ATA/100 UDMA timings */ static int set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed) { struct ide_timing *t = ide_timing_find_mode(speed); u32 tr; if (speed > XFER_UDMA_5 || t == NULL) return 1; tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma); *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr; *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN; return 0; } /* * Calculate Shasta ATA/133 UDMA timings */ static int set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed) { struct ide_timing *t = ide_timing_find_mode(speed); u32 tr; if (speed > XFER_UDMA_6 || t == NULL) return 1; tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma); *ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr; *ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN; return 0; } /* * Calculate MDMA timings for all cells */ static void set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, u8 speed) { u16 *id = drive->id; int cycleTime, accessTime = 0, recTime = 0; unsigned accessTicks, recTicks; struct mdma_timings_t* tm = NULL; int i; /* Get default cycle time for mode */ switch(speed & 0xf) { case 0: cycleTime = 480; break; case 1: cycleTime = 150; break; case 2: cycleTime = 120; break; default: BUG(); break; } /* Check if drive provides explicit DMA cycle time */ if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME]) cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime); /* OHare limits according to some old Apple sources */ if ((intf_type == controller_ohare) && (cycleTime < 150)) cycleTime = 150; /* Get the proper timing array for this controller */ switch(intf_type) { case controller_sh_ata6: case controller_un_ata6: case controller_k2_ata6: break; case controller_kl_ata4: tm = mdma_timings_66; break; case controller_kl_ata3: tm = mdma_timings_33k; break; default: tm = mdma_timings_33; break; } if (tm != NULL) { /* Lookup matching access & recovery times */ i = -1; for (;;) { if (tm[i+1].cycleTime < cycleTime) break; i++; } cycleTime = tm[i].cycleTime; accessTime = tm[i].accessTime; recTime = tm[i].recoveryTime; #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n", drive->name, cycleTime, accessTime, recTime); #endif } switch(intf_type) { case controller_sh_ata6: { /* 133Mhz cell */ u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime); *timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr; *timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN; } case controller_un_ata6: case controller_k2_ata6: { /* 100Mhz cell */ u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime); *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr; *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN; } break; case controller_kl_ata4: /* 66Mhz cell */ accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); accessTicks = max(accessTicks, 0x1U); recTicks = SYSCLK_TICKS_66(recTime); recTicks = min(recTicks, 0x1fU); recTicks = max(recTicks, 0x3U); /* Clear out mdma bits and disable udma */ *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) | (accessTicks << TR_66_MDMA_ACCESS_SHIFT) | (recTicks << TR_66_MDMA_RECOVERY_SHIFT); break; case controller_kl_ata3: /* 33Mhz cell on KeyLargo */ accessTicks = SYSCLK_TICKS(accessTime); accessTicks = max(accessTicks, 1U); accessTicks = min(accessTicks, 0x1fU); accessTime = accessTicks * IDE_SYSCLK_NS; recTicks = SYSCLK_TICKS(recTime); recTicks = max(recTicks, 1U); recTicks = min(recTicks, 0x1fU); *timings = ((*timings) & ~TR_33_MDMA_MASK) | (accessTicks << TR_33_MDMA_ACCESS_SHIFT) | (recTicks << TR_33_MDMA_RECOVERY_SHIFT); break; default: { /* 33Mhz cell on others */ int halfTick = 0; int origAccessTime = accessTime; int origRecTime = recTime; accessTicks = SYSCLK_TICKS(accessTime); accessTicks = max(accessTicks, 1U); accessTicks = min(accessTicks, 0x1fU); accessTime = accessTicks * IDE_SYSCLK_NS; recTicks = SYSCLK_TICKS(recTime); recTicks = max(recTicks, 2U) - 1; recTicks = min(recTicks, 0x1fU); recTime = (recTicks + 1) * IDE_SYSCLK_NS; if ((accessTicks > 1) && ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) && ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) { halfTick = 1; accessTicks--; } *timings = ((*timings) & ~TR_33_MDMA_MASK) | (accessTicks << TR_33_MDMA_ACCESS_SHIFT) | (recTicks << TR_33_MDMA_RECOVERY_SHIFT); if (halfTick) *timings |= TR_33_MDMA_HALFTICK; } } #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n", drive->name, speed & 0xf, *timings); #endif } static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); int ret = 0; u32 *timings, *timings2, tl[2]; u8 unit = drive->dn & 1; const u8 speed = drive->dma_mode; timings = &pmif->timings[unit]; timings2 = &pmif->timings[unit+2]; /* Copy timings to local image */ tl[0] = *timings; tl[1] = *timings2; if (speed >= XFER_UDMA_0) { if (pmif->kind == controller_kl_ata4) ret = set_timings_udma_ata4(&tl[0], speed); else if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6) ret = set_timings_udma_ata6(&tl[0], &tl[1], speed); else if (pmif->kind == controller_sh_ata6) ret = set_timings_udma_shasta(&tl[0], &tl[1], speed); else ret = -1; } else set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); if (ret) return; /* Apply timings to controller */ *timings = tl[0]; *timings2 = tl[1]; pmac_ide_do_update_timings(drive); } /* * Blast some well known "safe" values to the timing registers at init or * wakeup from sleep time, before we do real calculation */ static void sanitize_timings(pmac_ide_hwif_t *pmif) { unsigned int value, value2 = 0; switch(pmif->kind) { case controller_sh_ata6: value = 0x0a820c97; value2 = 0x00033031; break; case controller_un_ata6: case controller_k2_ata6: value = 0x08618a92; value2 = 0x00002921; break; case controller_kl_ata4: value = 0x0008438c; break; case controller_kl_ata3: value = 0x00084526; break; case controller_heathrow: case controller_ohare: default: value = 0x00074526; break; } pmif->timings[0] = pmif->timings[1] = value; pmif->timings[2] = pmif->timings[3] = value2; } static int on_media_bay(pmac_ide_hwif_t *pmif) { return pmif->mdev && pmif->mdev->media_bay != NULL; } /* Suspend call back, should be called after the child devices * have actually been suspended */ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif) { /* We clear the timings */ pmif->timings[0] = 0; pmif->timings[1] = 0; disable_irq(pmif->irq); /* The media bay will handle itself just fine */ if (on_media_bay(pmif)) return 0; /* Kauai has bus control FCRs directly here */ if (pmif->kauai_fcr) { u32 fcr = readl(pmif->kauai_fcr); fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE); writel(fcr, pmif->kauai_fcr); } /* Disable the bus on older machines and the cell on kauai */ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 0); return 0; } /* Resume call back, should be called before the child devices * are resumed */ static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) { /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ if (!on_media_bay(pmif)) { ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); msleep(10); ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0); /* Kauai has it different */ if (pmif->kauai_fcr) { u32 fcr = readl(pmif->kauai_fcr); fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE; writel(fcr, pmif->kauai_fcr); } msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); } /* Sanitize drive timings */ sanitize_timings(pmif); enable_irq(pmif->irq); return 0; } static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) { pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); struct device_node *np = pmif->node; const char *cable = of_get_property(np, "cable-type", NULL); struct device_node *root = of_find_node_by_path("/"); const char *model = of_get_property(root, "model", NULL); /* Get cable type from device-tree. */ if (cable && !strncmp(cable, "80-", 3)) { /* Some drives fail to detect 80c cable in PowerBook */ /* These machine use proprietary short IDE cable anyway */ if (!strncmp(model, "PowerBook", 9)) return ATA_CBL_PATA40_SHORT; else return ATA_CBL_PATA80; } /* * G5's seem to have incorrect cable type in device-tree. * Let's assume they have a 80 conductor cable, this seem * to be always the case unless the user mucked around. */ if (of_device_is_compatible(np, "K2-UATA") || of_device_is_compatible(np, "shasta-ata")) return ATA_CBL_PATA80; return ATA_CBL_PATA40; } static void pmac_ide_init_dev(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); if (on_media_bay(pmif)) { if (check_media_bay(pmif->mdev->media_bay) == MB_CD) { drive->dev_flags &= ~IDE_DFLAG_NOPROBE; return; } drive->dev_flags |= IDE_DFLAG_NOPROBE; } } static const struct ide_tp_ops pmac_tp_ops = { .exec_command = pmac_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = pmac_write_devctl, .dev_select = pmac_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_tp_ops pmac_ata6_tp_ops = { .exec_command = pmac_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = pmac_write_devctl, .dev_select = pmac_kauai_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_port_ops pmac_ide_ata4_port_ops = { .init_dev = pmac_ide_init_dev, .set_pio_mode = pmac_ide_set_pio_mode, .set_dma_mode = pmac_ide_set_dma_mode, .cable_detect = pmac_ide_cable_detect, }; static const struct ide_port_ops pmac_ide_port_ops = { .init_dev = pmac_ide_init_dev, .set_pio_mode = pmac_ide_set_pio_mode, .set_dma_mode = pmac_ide_set_dma_mode, }; static const struct ide_dma_ops pmac_dma_ops; static const struct ide_port_info pmac_port_info = { .name = DRV_NAME, .init_dma = pmac_ide_init_dma, .chipset = ide_pmac, .tp_ops = &pmac_tp_ops, .port_ops = &pmac_ide_port_ops, .dma_ops = &pmac_dma_ops, .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | IDE_HFLAG_POST_SET_MODE | IDE_HFLAG_MMIO | IDE_HFLAG_UNMASK_IRQS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }; /* * Setup, register & probe an IDE channel driven by this driver, this is * called by one of the 2 probe functions (macio or PCI). */ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw) { struct device_node *np = pmif->node; const int *bidp; struct ide_host *host; ide_hwif_t *hwif; struct ide_hw *hws[] = { hw }; struct ide_port_info d = pmac_port_info; int rc; pmif->broken_dma = pmif->broken_dma_warn = 0; if (of_device_is_compatible(np, "shasta-ata")) { pmif->kind = controller_sh_ata6; d.tp_ops = &pmac_ata6_tp_ops; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA6; } else if (of_device_is_compatible(np, "kauai-ata")) { pmif->kind = controller_un_ata6; d.tp_ops = &pmac_ata6_tp_ops; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA5; } else if (of_device_is_compatible(np, "K2-UATA")) { pmif->kind = controller_k2_ata6; d.tp_ops = &pmac_ata6_tp_ops; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA5; } else if (of_device_is_compatible(np, "keylargo-ata")) { if (strcmp(np->name, "ata-4") == 0) { pmif->kind = controller_kl_ata4; d.port_ops = &pmac_ide_ata4_port_ops; d.udma_mask = ATA_UDMA4; } else pmif->kind = controller_kl_ata3; } else if (of_device_is_compatible(np, "heathrow-ata")) { pmif->kind = controller_heathrow; } else { pmif->kind = controller_ohare; pmif->broken_dma = 1; } bidp = of_get_property(np, "AAPL,bus-id", NULL); pmif->aapl_bus_id = bidp ? *bidp : 0; /* On Kauai-type controllers, we make sure the FCR is correct */ if (pmif->kauai_fcr) writel(KAUAI_FCR_UATA_MAGIC | KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); /* Make sure we have sane timings */ sanitize_timings(pmif); /* If we are on a media bay, wait for it to settle and lock it */ if (pmif->mdev) lock_media_bay(pmif->mdev->media_bay); host = ide_host_alloc(&d, hws, 1); if (host == NULL) { rc = -ENOMEM; goto bail; } hwif = pmif->hwif = host->ports[0]; if (on_media_bay(pmif)) { /* Fixup bus ID for media bay */ if (!bidp) pmif->aapl_bus_id = 1; } else if (pmif->kind == controller_ohare) { /* The code below is having trouble on some ohare machines * (timing related ?). Until I can put my hand on one of these * units, I keep the old way */ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); } else { /* This is necessary to enable IDE when net-booting */ ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); msleep(10); ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0); msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); } printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " "bus ID %d%s, irq %d\n", model_name[pmif->kind], pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, on_media_bay(pmif) ? " (mediabay)" : "", hw->irq); rc = ide_host_register(host, &d, hws); if (rc) pmif->hwif = NULL; if (pmif->mdev) unlock_media_bay(pmif->mdev->media_bay); bail: if (rc && host) ide_host_free(host); return rc; } static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) { int i; for (i = 0; i < 8; ++i) hw->io_ports_array[i] = base + i * 0x10; hw->io_ports.ctl_addr = base + 0x160; } /* * Attach to a macio probed interface */ static int pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) { void __iomem *base; unsigned long regbase; pmac_ide_hwif_t *pmif; int irq, rc; struct ide_hw hw; pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) return -ENOMEM; if (macio_resource_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no address for %s\n", mdev->ofdev.dev.of_node->full_name); rc = -ENXIO; goto out_free_pmif; } /* Request memory resource for IO ports */ if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { printk(KERN_ERR "ide-pmac: can't request MMIO resource for " "%s!\n", mdev->ofdev.dev.of_node->full_name); rc = -EBUSY; goto out_free_pmif; } /* XXX This is bogus. Should be fixed in the registry by checking * the kind of host interrupt controller, a bit like gatwick * fixes in irq.c. That works well enough for the single case * where that happens though... */ if (macio_irq_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no intrs for device %s, using " "13\n", mdev->ofdev.dev.of_node->full_name); irq = irq_create_mapping(NULL, 13); } else irq = macio_irq(mdev, 0); base = ioremap(macio_resource_start(mdev, 0), 0x400); regbase = (unsigned long) base; pmif->mdev = mdev; pmif->node = mdev->ofdev.dev.of_node; pmif->regbase = regbase; pmif->irq = irq; pmif->kauai_fcr = NULL; if (macio_resource_count(mdev) >= 2) { if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) printk(KERN_WARNING "ide-pmac: can't request DMA " "resource for %s!\n", mdev->ofdev.dev.of_node->full_name); else pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); } else pmif->dma_regs = NULL; dev_set_drvdata(&mdev->ofdev.dev, pmif); memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = irq; hw.dev = &mdev->bus->pdev->dev; hw.parent = &mdev->ofdev.dev; rc = pmac_ide_setup_device(pmif, &hw); if (rc != 0) { /* The inteface is released to the common IDE layer */ dev_set_drvdata(&mdev->ofdev.dev, NULL); iounmap(base); if (pmif->dma_regs) { iounmap(pmif->dma_regs); macio_release_resource(mdev, 1); } macio_release_resource(mdev, 0); kfree(pmif); } return rc; out_free_pmif: kfree(pmif); return rc; } static int pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) { pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev); int rc = 0; if (mesg.event != mdev->ofdev.dev.power.power_state.event && (mesg.event & PM_EVENT_SLEEP)) { rc = pmac_ide_do_suspend(pmif); if (rc == 0) mdev->ofdev.dev.power.power_state = mesg; } return rc; } static int pmac_ide_macio_resume(struct macio_dev *mdev) { pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev); int rc = 0; if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { rc = pmac_ide_do_resume(pmif); if (rc == 0) mdev->ofdev.dev.power.power_state = PMSG_ON; } return rc; } /* * Attach to a PCI probed interface */ static int pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) { struct device_node *np; pmac_ide_hwif_t *pmif; void __iomem *base; unsigned long rbase, rlen; int rc; struct ide_hw hw; np = pci_device_to_OF_node(pdev); if (np == NULL) { printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n"); return -ENODEV; } pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) return -ENOMEM; if (pci_enable_device(pdev)) { printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " "%s\n", np->full_name); rc = -ENXIO; goto out_free_pmif; } pci_set_master(pdev); if (pci_request_regions(pdev, "Kauai ATA")) { printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for " "%s\n", np->full_name); rc = -ENXIO; goto out_free_pmif; } pmif->mdev = NULL; pmif->node = np; rbase = pci_resource_start(pdev, 0); rlen = pci_resource_len(pdev, 0); base = ioremap(rbase, rlen); pmif->regbase = (unsigned long) base + 0x2000; pmif->dma_regs = base + 0x1000; pmif->kauai_fcr = base; pmif->irq = pdev->irq; pci_set_drvdata(pdev, pmif); memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = pdev->irq; hw.dev = &pdev->dev; rc = pmac_ide_setup_device(pmif, &hw); if (rc != 0) { /* The inteface is released to the common IDE layer */ iounmap(base); pci_release_regions(pdev); kfree(pmif); } return rc; out_free_pmif: kfree(pmif); return rc; } static int pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) { pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev); int rc = 0; if (mesg.event != pdev->dev.power.power_state.event && (mesg.event & PM_EVENT_SLEEP)) { rc = pmac_ide_do_suspend(pmif); if (rc == 0) pdev->dev.power.power_state = mesg; } return rc; } static int pmac_ide_pci_resume(struct pci_dev *pdev) { pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev); int rc = 0; if (pdev->dev.power.power_state.event != PM_EVENT_ON) { rc = pmac_ide_do_resume(pmif); if (rc == 0) pdev->dev.power.power_state = PMSG_ON; } return rc; } #ifdef CONFIG_PMAC_MEDIABAY static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state) { pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev); switch(mb_state) { case MB_CD: if (!pmif->hwif->present) ide_port_scan(pmif->hwif); break; default: if (pmif->hwif->present) ide_port_unregister_devices(pmif->hwif); } } #endif /* CONFIG_PMAC_MEDIABAY */ static struct of_device_id pmac_ide_macio_match[] = { { .name = "IDE", }, { .name = "ATA", }, { .type = "ide", }, { .type = "ata", }, {}, }; static struct macio_driver pmac_ide_macio_driver = { .driver = { .name = "ide-pmac", .owner = THIS_MODULE, .of_match_table = pmac_ide_macio_match, }, .probe = pmac_ide_macio_attach, .suspend = pmac_ide_macio_suspend, .resume = pmac_ide_macio_resume, #ifdef CONFIG_PMAC_MEDIABAY .mediabay_event = pmac_ide_macio_mb_event, #endif }; static const struct pci_device_id pmac_ide_pci_match[] = { { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 }, { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 }, {}, }; static struct pci_driver pmac_ide_pci_driver = { .name = "ide-pmac", .id_table = pmac_ide_pci_match, .probe = pmac_ide_pci_attach, .suspend = pmac_ide_pci_suspend, .resume = pmac_ide_pci_resume, }; MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match); int __init pmac_ide_probe(void) { int error; if (!machine_is(powermac)) return -ENODEV; #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST error = pci_register_driver(&pmac_ide_pci_driver); if (error) goto out; error = macio_register_driver(&pmac_ide_macio_driver); if (error) { pci_unregister_driver(&pmac_ide_pci_driver); goto out; } #else error = macio_register_driver(&pmac_ide_macio_driver); if (error) goto out; error = pci_register_driver(&pmac_ide_pci_driver); if (error) { macio_unregister_driver(&pmac_ide_macio_driver); goto out; } #endif out: return error; } /* * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. */ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); struct dbdma_cmd *table; volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; struct scatterlist *sg; int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE); int i = cmd->sg_nents, count = 0; /* DMA table is already aligned */ table = (struct dbdma_cmd *) pmif->dma_table_cpu; /* Make sure DMA controller is stopped (necessary ?) */ writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control); while (readl(&dma->status) & RUN) udelay(1); /* Build DBDMA commands list */ sg = hwif->sg_table; while (i && sg_dma_len(sg)) { u32 cur_addr; u32 cur_len; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) { if (pmif->broken_dma_warn == 0) { printk(KERN_WARNING "%s: DMA on non aligned address, " "switching to PIO on Ohare chipset\n", drive->name); pmif->broken_dma_warn = 1; } return 0; } while (cur_len) { unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; if (count++ >= MAX_DCMDS) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); return 0; } table->command = cpu_to_le16(wr? OUTPUT_MORE: INPUT_MORE); table->req_count = cpu_to_le16(tc); table->phy_addr = cpu_to_le32(cur_addr); table->cmd_dep = 0; table->xfer_status = 0; table->res_count = 0; cur_addr += tc; cur_len -= tc; ++table; } sg = sg_next(sg); i--; } /* convert the last command to an input/output last command */ if (count) { table[-1].command = cpu_to_le16(wr? OUTPUT_LAST: INPUT_LAST); /* add the stop command to the end of the list */ memset(table, 0, sizeof(struct dbdma_cmd)); table->command = cpu_to_le16(DBDMA_STOP); mb(); writel(hwif->dmatable_dma, &dma->cmdptr); return 1; } printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); return 0; /* revert to PIO for this request */ } /* * Prepare a DMA transfer. We build the DMA table, adjust the timings for * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion */ static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4); u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); if (pmac_ide_build_dmatable(drive, cmd) == 0) return 1; /* Apple adds 60ns to wrDataSetup on reads */ if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) { writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL), PMAC_IDE_REG(IDE_TIMING_CONFIG)); (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG)); } return 0; } /* * Kick the DMA controller into life after the DMA command has been issued * to the drive. */ static void pmac_ide_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma; dma = pmif->dma_regs; writel((RUN << 16) | RUN, &dma->control); /* Make sure it gets to the controller right now */ (void)readl(&dma->control); } /* * After a DMA transfer, make sure the controller is stopped */ static int pmac_ide_dma_end (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; u32 dstat; dstat = readl(&dma->status); writel(((RUN|WAKE|DEAD) << 16), &dma->control); /* verify good dma status. we don't check for ACTIVE beeing 0. We should... * in theory, but with ATAPI decices doing buffer underruns, that would * cause us to disable DMA, which isn't what we want */ return (dstat & (RUN|DEAD)) != RUN; } /* * Check out that the interrupt we got was for us. We can't always know this * for sure with those Apple interfaces (well, we could on the recent ones but * that's not implemented yet), on the other hand, we don't have shared interrupts * so it's not really a problem */ static int pmac_ide_dma_test_irq (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; unsigned long status, timeout; /* We have to things to deal with here: * * - The dbdma won't stop if the command was started * but completed with an error without transferring all * datas. This happens when bad blocks are met during * a multi-block transfer. * * - The dbdma fifo hasn't yet finished flushing to * to system memory when the disk interrupt occurs. * */ /* If ACTIVE is cleared, the STOP command have passed and * transfer is complete. */ status = readl(&dma->status); if (!(status & ACTIVE)) return 1; /* If dbdma didn't execute the STOP command yet, the * active bit is still set. We consider that we aren't * sharing interrupts (which is hopefully the case with * those controllers) and so we just try to flush the * channel for pending data in the fifo */ udelay(1); writel((FLUSH << 16) | FLUSH, &dma->control); timeout = 0; for (;;) { udelay(1); status = readl(&dma->status); if ((status & FLUSH) == 0) break; if (++timeout > 100) { printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n", hwif->index); break; } } return 1; } static void pmac_ide_dma_host_set(ide_drive_t *drive, int on) { } static void pmac_ide_dma_lost_irq (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; unsigned long status = readl(&dma->status); printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status); } static const struct ide_dma_ops pmac_dma_ops = { .dma_host_set = pmac_ide_dma_host_set, .dma_setup = pmac_ide_dma_setup, .dma_start = pmac_ide_dma_start, .dma_end = pmac_ide_dma_end, .dma_test_irq = pmac_ide_dma_test_irq, .dma_lost_irq = pmac_ide_dma_lost_irq, }; /* * Allocate the data structures needed for using DMA with an interface * and fill the proper list of functions pointers */ static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d) { pmac_ide_hwif_t *pmif = dev_get_drvdata(hwif->gendev.parent); struct pci_dev *dev = to_pci_dev(hwif->dev); /* We won't need pci_dev if we switch to generic consistent * DMA routines ... */ if (dev == NULL || pmif->dma_regs == 0) return -ENODEV; /* * Allocate space for the DBDMA commands. * The +2 is +1 for the stop command and +1 to allow for * aligning the start address to a multiple of 16 bytes. */ pmif->dma_table_cpu = dma_alloc_coherent(&dev->dev, (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), &hwif->dmatable_dma, GFP_KERNEL); if (pmif->dma_table_cpu == NULL) { printk(KERN_ERR "%s: unable to allocate DMA command list\n", hwif->name); return -ENOMEM; } hwif->sg_max_nents = MAX_DCMDS; return 0; } module_init(pmac_ide_probe); MODULE_LICENSE("GPL");
gpl-2.0
ajtomato/linux-2.6.34
drivers/infiniband/hw/nes/nes.c
803
33692
/* * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/if_arp.h> #include <linux/highmem.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <rdma/ib_smi.h> #include <rdma/ib_verbs.h> #include <rdma/ib_pack.h> #include <rdma/iw_cm.h> #include "nes.h" #include <net/netevent.h> #include <net/neighbour.h> #include <linux/route.h> #include <net/ip_fib.h> MODULE_AUTHOR("NetEffect"); MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); int max_mtu = 9000; int interrupt_mod_interval = 0; /* Interoperability */ int mpa_version = 1; module_param(mpa_version, int, 0644); MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)"); /* Interoperability */ int disable_mpa_crc = 0; module_param(disable_mpa_crc, int, 0644); MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); unsigned int send_first = 0; module_param(send_first, int, 0644); MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); unsigned int nes_drv_opt = 0; module_param(nes_drv_opt, int, 0644); MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); unsigned int nes_debug_level = 0; module_param_named(debug_level, nes_debug_level, uint, 0644); MODULE_PARM_DESC(debug_level, "Enable debug output level"); unsigned int wqm_quanta = 0x10000; module_param(wqm_quanta, int, 0644); MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); static unsigned int limit_maxrdreqsz; module_param(limit_maxrdreqsz, bool, 0644); MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); atomic_t qps_destroyed; static unsigned int ee_flsh_adapter; static unsigned int sysfs_nonidx_addr; static unsigned int sysfs_idx_addr; static struct pci_device_id nes_pci_table[] = { {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID}, {0} }; MODULE_DEVICE_TABLE(pci, nes_pci_table); static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *); static int nes_net_event(struct notifier_block *, unsigned long, void *); static int nes_notifiers_registered; static struct notifier_block nes_inetaddr_notifier = { .notifier_call = nes_inetaddr_event }; static struct notifier_block nes_net_notifier = { .notifier_call = nes_net_event }; /** * nes_inetaddr_event */ static int nes_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *event_netdev = ifa->ifa_dev->dev; struct nes_device *nesdev; struct net_device *netdev; struct nes_vnic *nesvnic; nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", &ifa->ifa_address, &ifa->ifa_mask); list_for_each_entry(nesdev, &nes_dev_list, list) { nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", nesdev, nesdev->netdev[0]->name); netdev = nesdev->netdev[0]; nesvnic = netdev_priv(netdev); if (netdev == event_netdev) { if (nesvnic->rdma_enabled == 0) { nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" " RDMA is not enabled.\n", netdev->name); return NOTIFY_OK; } /* we have ifa->ifa_address/mask here if we need it */ switch (event) { case NETDEV_DOWN: nes_debug(NES_DBG_NETDEV, "event:DOWN\n"); nes_write_indexed(nesdev, NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0); nes_manage_arp_cache(netdev, netdev->dev_addr, ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); nesvnic->local_ipaddr = 0; return NOTIFY_OK; break; case NETDEV_UP: nes_debug(NES_DBG_NETDEV, "event:UP\n"); if (nesvnic->local_ipaddr != 0) { nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); return NOTIFY_OK; } /* Add the address to the IP table */ nesvnic->local_ipaddr = ifa->ifa_address; nes_write_indexed(nesdev, NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), ntohl(ifa->ifa_address)); nes_manage_arp_cache(netdev, netdev->dev_addr, ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); return NOTIFY_OK; break; default: break; } } } return NOTIFY_DONE; } /** * nes_net_event */ static int nes_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct neighbour *neigh = ptr; struct nes_device *nesdev; struct net_device *netdev; struct nes_vnic *nesvnic; switch (event) { case NETEVENT_NEIGH_UPDATE: list_for_each_entry(nesdev, &nes_dev_list, list) { /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */ netdev = nesdev->netdev[0]; nesvnic = netdev_priv(netdev); if (netdev == neigh->dev) { if (nesvnic->rdma_enabled == 0) { nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n", netdev->name); } else { if (neigh->nud_state & NUD_VALID) { nes_manage_arp_cache(neigh->dev, neigh->ha, ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD); } else { nes_manage_arp_cache(neigh->dev, neigh->ha, ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE); } } return NOTIFY_OK; } } break; default: nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event); break; } return NOTIFY_DONE; } /** * nes_add_ref */ void nes_add_ref(struct ib_qp *ibqp) { struct nes_qp *nesqp; nesqp = to_nesqp(ibqp); nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n", ibqp->qp_num, atomic_read(&nesqp->refcount)); atomic_inc(&nesqp->refcount); } static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { unsigned long flags; struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; struct nes_adapter *nesadapter = nesdev->nesadapter; u32 qp_id; atomic_inc(&qps_destroyed); /* Free the control structures */ qp_id = nesqp->hwqp.qp_id; if (nesqp->pbl_vbase) { pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); spin_lock_irqsave(&nesadapter->pbl_lock, flags); nesadapter->free_256pbl++; spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase); nesqp->pbl_vbase = NULL; } else { pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase); } nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id); nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL; kfree(nesqp->allocated_buffer); } /** * nes_rem_ref */ void nes_rem_ref(struct ib_qp *ibqp) { u64 u64temp; struct nes_qp *nesqp; struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_cqp_request *cqp_request; u32 opcode; nesqp = to_nesqp(ibqp); if (atomic_read(&nesqp->refcount) == 0) { printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n", __func__, ibqp->qp_num, nesqp->last_aeq); BUG(); } if (atomic_dec_and_test(&nesqp->refcount)) { /* Destroy the QP */ cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); return; } cqp_request->waiting = 0; cqp_request->callback = 1; cqp_request->cqp_callback = nes_cqp_rem_ref_callback; cqp_request->cqp_callback_pointer = nesqp; cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP; if (nesqp->hte_added) { opcode |= NES_CQP_QP_DEL_HTE; nesqp->hte_added = 0; } set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); u64temp = (u64)nesqp->nesqp_context_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); nes_post_cqp_request(nesdev, cqp_request); } } /** * nes_get_qp */ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) { struct nes_vnic *nesvnic = to_nesvnic(device); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp))) return NULL; return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; } /** * nes_print_macaddr */ static void nes_print_macaddr(struct net_device *netdev) { nes_debug(NES_DBG_INIT, "%s: %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq); } /** * nes_interrupt - handle interrupts */ static irqreturn_t nes_interrupt(int irq, void *dev_id) { struct nes_device *nesdev = (struct nes_device *)dev_id; int handled = 0; u32 int_mask; u32 int_req; u32 int_stat; u32 intf_int_stat; u32 timer_stat; if (nesdev->msi_enabled) { /* No need to read the interrupt pending register if msi is enabled */ handled = 1; } else { if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) { /* Master interrupt enable provides synchronization for kicking off bottom half when interrupt sharing is going on */ int_mask = nes_read32(nesdev->regs + NES_INT_MASK); if (int_mask & 0x80000000) { /* Check interrupt status to see if this might be ours */ int_stat = nes_read32(nesdev->regs + NES_INT_STAT); int_req = nesdev->int_req; if (int_stat&int_req) { /* if interesting CEQ or AEQ is pending, claim the interrupt */ if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) { handled = 1; } else { if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) { /* Timer might be running but might be for another function */ timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT); if ((timer_stat & nesdev->timer_int_req) != 0) { handled = 1; } } if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) && (handled == 0)) { intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); if ((intf_int_stat & nesdev->intf_int_req) != 0) { handled = 1; } } } if (handled) { nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000)); int_mask = nes_read32(nesdev->regs+NES_INT_MASK); /* Save off the status to save an additional read */ nesdev->int_stat = int_stat; nesdev->napi_isr_ran = 1; } } } } else { handled = nes_read32(nesdev->regs+NES_INT_PENDING); } } if (handled) { if (nes_napi_isr(nesdev) == 0) { tasklet_schedule(&nesdev->dpc_tasklet); } return IRQ_HANDLED; } else { return IRQ_NONE; } } /** * nes_probe - Device initialization */ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct nes_device *nesdev = NULL; int ret = 0; struct nes_vnic *nesvnic = NULL; void __iomem *mmio_regs = NULL; u8 hw_rev; assert(pcidev != NULL); assert(ent != NULL); printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n", DRV_VERSION, pci_name(pcidev)); ret = pci_enable_device(pcidev); if (ret) { printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev)); goto bail0; } nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n", (long unsigned int)pci_resource_start(pcidev, BAR_0), (long unsigned int)pci_resource_len(pcidev, BAR_0)); nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n", (long unsigned int)pci_resource_start(pcidev, BAR_1), (long unsigned int)pci_resource_len(pcidev, BAR_1)); /* Make sure PCI base addr are MMIO */ if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) || !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) { printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); ret = -ENODEV; goto bail1; } /* Reserve PCI I/O and memory resources */ ret = pci_request_regions(pcidev, DRV_NAME); if (ret) { printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev)); goto bail1; } if ((sizeof(dma_addr_t) > 4)) { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret < 0) { printk(KERN_ERR PFX "64b DMA mask configuration failed\n"); goto bail2; } ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n"); goto bail2; } } else { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret < 0) { printk(KERN_ERR PFX "32b DMA mask configuration failed\n"); goto bail2; } ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n"); goto bail2; } } pci_set_master(pcidev); /* Allocate hardware structure */ nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL); if (!nesdev) { printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev)); ret = -ENOMEM; goto bail2; } nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev); nesdev->pcidev = pcidev; pci_set_drvdata(pcidev, nesdev); pci_read_config_byte(pcidev, 0x0008, &hw_rev); nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev); spin_lock_init(&nesdev->indexed_regs_lock); /* Remap the PCI registers in adapter BAR0 to kernel VA space */ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), pci_resource_len(pcidev, BAR_0)); if (mmio_regs == NULL) { printk(KERN_ERR PFX "Unable to remap BAR0\n"); ret = -EIO; goto bail3; } nesdev->regs = mmio_regs; nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs; /* Ensure interrupts are disabled */ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff); if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) { if (!pci_enable_msi(nesdev->pcidev)) { nesdev->msi_enabled = 1; nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n", pci_name(pcidev)); } else { nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n", pci_name(pcidev)); } } else { nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n", pci_name(pcidev)); } nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0); nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1); /* Init the adapter */ nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); if (!nesdev->nesadapter) { printk(KERN_ERR PFX "Unable to initialize adapter.\n"); ret = -ENOMEM; goto bail5; } nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; nesdev->nesadapter->wqm_quanta = wqm_quanta; /* nesdev->base_doorbell_index = nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ nesdev->base_doorbell_index = 1; nesdev->doorbell_start = nesdev->nesadapter->doorbell_start; if (nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { switch (PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count) { case 1: nesdev->mac_index = 2; break; case 2: nesdev->mac_index = 1; break; case 3: nesdev->mac_index = 3; break; case 0: default: nesdev->mac_index = 0; } } else { nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count; } if ((limit_maxrdreqsz || ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) && (hw_rev == NE020_REV1))) && (pcie_get_readrq(pcidev) > 256)) { if (pcie_set_readrq(pcidev, 256)) printk(KERN_ERR PFX "Unable to set max read request" " to 256 bytes\n"); else nes_debug(NES_DBG_INIT, "Max read request size set" " to 256 bytes\n"); } tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); /* bring up the Control QP */ if (nes_init_cqp(nesdev)) { ret = -ENODEV; goto bail6; } /* Arm the CCQ */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | PCI_FUNC(nesdev->pcidev->devfn)); nes_read32(nesdev->regs+NES_CQE_ALLOC); /* Enable the interrupts */ nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) | (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); if (PCI_FUNC(nesdev->pcidev->devfn) < 4) { nesdev->int_req |= (1 << (PCI_FUNC(nesdev->mac_index)+24)); } /* TODO: This really should be the first driver to load, not function 0 */ if (PCI_FUNC(nesdev->pcidev->devfn) == 0) { /* pick up PCI and critical errors if the first driver to load */ nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR; nesdev->int_req |= NES_INT_INTF; } else { nesdev->intf_int_req = 0; } nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790); /* deal with both periodic and one_shot */ nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn); nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req; nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n", PCI_FUNC(nesdev->pcidev->devfn), nesdev->timer_int_req, nesdev->nesadapter->timer_int_req); nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); list_add_tail(&nesdev->list, &nes_dev_list); /* Request an interrupt line for the driver */ ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev); if (ret) { printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", pci_name(pcidev), pcidev->irq); goto bail65; } nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); if (nes_notifiers_registered == 0) { register_inetaddr_notifier(&nes_inetaddr_notifier); register_netevent_notifier(&nes_net_notifier); } nes_notifiers_registered++; /* Initialize network devices */ if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) { goto bail7; } /* Register network device */ ret = register_netdev(netdev); if (ret) { printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); nes_netdev_destroy(netdev); goto bail7; } nes_print_macaddr(netdev); /* create a CM core for this netdev */ nesvnic = netdev_priv(netdev); nesdev->netdev_count++; nesdev->nesadapter->netdev_count++; printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", pci_name(pcidev)); return 0; bail7: printk(KERN_ERR PFX "bail7\n"); while (nesdev->netdev_count > 0) { nesdev->netdev_count--; nesdev->nesadapter->netdev_count--; unregister_netdev(nesdev->netdev[nesdev->netdev_count]); nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]); } nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n", nesdev->netdev_count, nesdev->nesadapter->netdev_count); nes_notifiers_registered--; if (nes_notifiers_registered == 0) { unregister_netevent_notifier(&nes_net_notifier); unregister_inetaddr_notifier(&nes_inetaddr_notifier); } list_del(&nesdev->list); nes_destroy_cqp(nesdev); bail65: printk(KERN_ERR PFX "bail65\n"); free_irq(pcidev->irq, nesdev); if (nesdev->msi_enabled) { pci_disable_msi(pcidev); } bail6: printk(KERN_ERR PFX "bail6\n"); tasklet_kill(&nesdev->dpc_tasklet); /* Deallocate the Adapter Structure */ nes_destroy_adapter(nesdev->nesadapter); bail5: printk(KERN_ERR PFX "bail5\n"); iounmap(nesdev->regs); bail3: printk(KERN_ERR PFX "bail3\n"); kfree(nesdev); bail2: pci_release_regions(pcidev); bail1: pci_disable_device(pcidev); bail0: return ret; } /** * nes_remove - unload from kernel */ static void __devexit nes_remove(struct pci_dev *pcidev) { struct nes_device *nesdev = pci_get_drvdata(pcidev); struct net_device *netdev; int netdev_index = 0; if (nesdev->netdev_count) { netdev = nesdev->netdev[netdev_index]; if (netdev) { netif_stop_queue(netdev); unregister_netdev(netdev); nes_netdev_destroy(netdev); nesdev->netdev[netdev_index] = NULL; nesdev->netdev_count--; nesdev->nesadapter->netdev_count--; } } nes_notifiers_registered--; if (nes_notifiers_registered == 0) { unregister_netevent_notifier(&nes_net_notifier); unregister_inetaddr_notifier(&nes_inetaddr_notifier); } list_del(&nesdev->list); nes_destroy_cqp(nesdev); free_irq(pcidev->irq, nesdev); tasklet_kill(&nesdev->dpc_tasklet); /* Deallocate the Adapter Structure */ nes_destroy_adapter(nesdev->nesadapter); if (nesdev->msi_enabled) { pci_disable_msi(pcidev); } iounmap(nesdev->regs); kfree(nesdev); /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */ pci_release_regions(pcidev); pci_disable_device(pcidev); pci_set_drvdata(pcidev, NULL); } static struct pci_driver nes_pci_driver = { .name = DRV_NAME, .id_table = nes_pci_table, .probe = nes_probe, .remove = __devexit_p(nes_remove), }; static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) { unsigned int devfn = 0xffffffff; unsigned char bus_number = 0xff; unsigned int i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { devfn = nesdev->pcidev->devfn; bus_number = nesdev->pcidev->bus->number; break; } i++; } return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn); } static ssize_t nes_store_adapter(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; ee_flsh_adapter = simple_strtoul(p, &p, 10); return strnlen(buf, count); } static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) { u32 eeprom_cmd = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd); } static ssize_t nes_store_ee_cmd(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) { u32 eeprom_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data); } static ssize_t nes_store_ee_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_EEPROM_DATA, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) { u32 flash_cmd = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd); } static ssize_t nes_store_flash_cmd(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_FLASH_COMMAND, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) { u32 flash_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data); } static ssize_t nes_store_flash_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_FLASH_DATA, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr); } static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') sysfs_nonidx_addr = simple_strtoul(p, &p, 16); return strnlen(buf, count); } static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) { u32 nonidx_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data); } static ssize_t nes_store_nonidx_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + sysfs_nonidx_addr, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr); } static ssize_t nes_store_idx_addr(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') sysfs_idx_addr = simple_strtoul(p, &p, 16); return strnlen(buf, count); } static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) { u32 idx_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { idx_data = nes_read_indexed(nesdev, sysfs_idx_addr); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data); } static ssize_t nes_store_idx_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write_indexed(nesdev, sysfs_idx_addr, val); break; } i++; } } return strnlen(buf, count); } /** * nes_show_wqm_quanta */ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) { u32 wqm_quanta_value = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { wqm_quanta_value = nesdev->nesadapter->wqm_quanta; break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta); } /** * nes_store_wqm_quanta */ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, const char *buf, size_t count) { unsigned long wqm_quanta_value; u32 wqm_config1; u32 i = 0; struct nes_device *nesdev; strict_strtoul(buf, 0, &wqm_quanta_value); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nesdev->nesadapter->wqm_quanta = wqm_quanta_value; wqm_config1 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG1); nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, ((wqm_quanta_value << 1) | (wqm_config1 & 0x00000001))); break; } i++; } return strnlen(buf, count); } static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, nes_show_adapter, nes_store_adapter); static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, nes_show_ee_cmd, nes_store_ee_cmd); static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR, nes_show_ee_data, nes_store_ee_data); static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR, nes_show_flash_cmd, nes_store_flash_cmd); static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR, nes_show_flash_data, nes_store_flash_data); static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR, nes_show_nonidx_addr, nes_store_nonidx_addr); static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR, nes_show_nonidx_data, nes_store_nonidx_data); static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR, nes_show_idx_addr, nes_store_idx_addr); static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, nes_show_idx_data, nes_store_idx_data); static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR, nes_show_wqm_quanta, nes_store_wqm_quanta); static int nes_create_driver_sysfs(struct pci_driver *drv) { int error; error = driver_create_file(&drv->driver, &driver_attr_adapter); error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd); error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data); error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd); error |= driver_create_file(&drv->driver, &driver_attr_flash_data); error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr); error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data); error |= driver_create_file(&drv->driver, &driver_attr_idx_addr); error |= driver_create_file(&drv->driver, &driver_attr_idx_data); error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta); return error; } static void nes_remove_driver_sysfs(struct pci_driver *drv) { driver_remove_file(&drv->driver, &driver_attr_adapter); driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd); driver_remove_file(&drv->driver, &driver_attr_eeprom_data); driver_remove_file(&drv->driver, &driver_attr_flash_cmd); driver_remove_file(&drv->driver, &driver_attr_flash_data); driver_remove_file(&drv->driver, &driver_attr_nonidx_addr); driver_remove_file(&drv->driver, &driver_attr_nonidx_data); driver_remove_file(&drv->driver, &driver_attr_idx_addr); driver_remove_file(&drv->driver, &driver_attr_idx_data); driver_remove_file(&drv->driver, &driver_attr_wqm_quanta); } /** * nes_init_module - module initialization entry point */ static int __init nes_init_module(void) { int retval; int retval1; retval = nes_cm_start(); if (retval) { printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n"); return retval; } retval = pci_register_driver(&nes_pci_driver); if (retval >= 0) { retval1 = nes_create_driver_sysfs(&nes_pci_driver); if (retval1 < 0) printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n"); } return retval; } /** * nes_exit_module - module unload entry point */ static void __exit nes_exit_module(void) { nes_cm_stop(); nes_remove_driver_sysfs(&nes_pci_driver); pci_unregister_driver(&nes_pci_driver); } module_init(nes_init_module); module_exit(nes_exit_module);
gpl-2.0
embeddedarm/linux-2.6.35-ts4800
fs/nfs/dns_resolve.c
803
7544
/* * linux/fs/nfs/dns_resolve.c * * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com> * * Resolves DNS hostnames into valid ip addresses */ #include <linux/hash.h> #include <linux/string.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/seq_file.h> #include <linux/inet.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/svcauth.h> #include "dns_resolve.h" #include "cache_lib.h" #define NFS_DNS_HASHBITS 4 #define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS) static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE]; struct nfs_dns_ent { struct cache_head h; char *hostname; size_t namelen; struct sockaddr_storage addr; size_t addrlen; }; static void nfs_dns_ent_update(struct cache_head *cnew, struct cache_head *ckey) { struct nfs_dns_ent *new; struct nfs_dns_ent *key; new = container_of(cnew, struct nfs_dns_ent, h); key = container_of(ckey, struct nfs_dns_ent, h); memcpy(&new->addr, &key->addr, key->addrlen); new->addrlen = key->addrlen; } static void nfs_dns_ent_init(struct cache_head *cnew, struct cache_head *ckey) { struct nfs_dns_ent *new; struct nfs_dns_ent *key; new = container_of(cnew, struct nfs_dns_ent, h); key = container_of(ckey, struct nfs_dns_ent, h); kfree(new->hostname); new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); if (new->hostname) { new->namelen = key->namelen; nfs_dns_ent_update(cnew, ckey); } else { new->namelen = 0; new->addrlen = 0; } } static void nfs_dns_ent_put(struct kref *ref) { struct nfs_dns_ent *item; item = container_of(ref, struct nfs_dns_ent, h.ref); kfree(item->hostname); kfree(item); } static struct cache_head *nfs_dns_ent_alloc(void) { struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL); if (item != NULL) { item->hostname = NULL; item->namelen = 0; item->addrlen = 0; return &item->h; } return NULL; }; static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key) { return hash_str(key->hostname, NFS_DNS_HASHBITS); } static void nfs_dns_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); qword_add(bpp, blen, key->hostname); (*bpp)[-1] = '\n'; } static int nfs_dns_upcall(struct cache_detail *cd, struct cache_head *ch) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); int ret; ret = nfs_cache_upcall(cd, key->hostname); if (ret) ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request); return ret; } static int nfs_dns_match(struct cache_head *ca, struct cache_head *cb) { struct nfs_dns_ent *a; struct nfs_dns_ent *b; a = container_of(ca, struct nfs_dns_ent, h); b = container_of(cb, struct nfs_dns_ent, h); if (a->namelen == 0 || a->namelen != b->namelen) return 0; return memcmp(a->hostname, b->hostname, a->namelen) == 0; } static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct nfs_dns_ent *item; long ttl; if (h == NULL) { seq_puts(m, "# ip address hostname ttl\n"); return 0; } item = container_of(h, struct nfs_dns_ent, h); ttl = (long)item->h.expiry_time - (long)get_seconds(); if (ttl < 0) ttl = 0; if (!test_bit(CACHE_NEGATIVE, &h->flags)) { char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1]; rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf)); seq_printf(m, "%15s ", buf); } else seq_puts(m, "<none> "); seq_printf(m, "%15s %ld\n", item->hostname, ttl); return 0; } static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd, struct nfs_dns_ent *key) { struct cache_head *ch; ch = sunrpc_cache_lookup(cd, &key->h, nfs_dns_hash(key)); if (!ch) return NULL; return container_of(ch, struct nfs_dns_ent, h); } static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd, struct nfs_dns_ent *new, struct nfs_dns_ent *key) { struct cache_head *ch; ch = sunrpc_cache_update(cd, &new->h, &key->h, nfs_dns_hash(key)); if (!ch) return NULL; return container_of(ch, struct nfs_dns_ent, h); } static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) { char buf1[NFS_DNS_HOSTNAME_MAXLEN+1]; struct nfs_dns_ent key, *item; unsigned long ttl; ssize_t len; int ret = -EINVAL; if (buf[buflen-1] != '\n') goto out; buf[buflen-1] = '\0'; len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; key.addrlen = rpc_pton(buf1, len, (struct sockaddr *)&key.addr, sizeof(key.addr)); len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; key.hostname = buf1; key.namelen = len; memset(&key.h, 0, sizeof(key.h)); ttl = get_expiry(&buf); if (ttl == 0) goto out; key.h.expiry_time = ttl + get_seconds(); ret = -ENOMEM; item = nfs_dns_lookup(cd, &key); if (item == NULL) goto out; if (key.addrlen == 0) set_bit(CACHE_NEGATIVE, &key.h.flags); item = nfs_dns_update(cd, &key, item); if (item == NULL) goto out; ret = 0; cache_put(&item->h, cd); out: return ret; } static struct cache_detail nfs_dns_resolve = { .owner = THIS_MODULE, .hash_size = NFS_DNS_HASHTBL_SIZE, .hash_table = nfs_dns_table, .name = "dns_resolve", .cache_put = nfs_dns_ent_put, .cache_upcall = nfs_dns_upcall, .cache_parse = nfs_dns_parse, .cache_show = nfs_dns_show, .match = nfs_dns_match, .init = nfs_dns_ent_init, .update = nfs_dns_ent_update, .alloc = nfs_dns_ent_alloc, }; static int do_cache_lookup(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item, struct nfs_cache_defer_req *dreq) { int ret = -ENOMEM; *item = nfs_dns_lookup(cd, key); if (*item) { ret = cache_check(cd, &(*item)->h, &dreq->req); if (ret) *item = NULL; } return ret; } static int do_cache_lookup_nowait(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item) { int ret = -ENOMEM; *item = nfs_dns_lookup(cd, key); if (!*item) goto out_err; ret = -ETIMEDOUT; if (!test_bit(CACHE_VALID, &(*item)->h.flags) || (*item)->h.expiry_time < get_seconds() || cd->flush_time > (*item)->h.last_refresh) goto out_put; ret = -ENOENT; if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags)) goto out_put; return 0; out_put: cache_put(&(*item)->h, cd); out_err: *item = NULL; return ret; } static int do_cache_lookup_wait(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item) { struct nfs_cache_defer_req *dreq; int ret = -ENOMEM; dreq = nfs_cache_defer_req_alloc(); if (!dreq) goto out; ret = do_cache_lookup(cd, key, item, dreq); if (ret == -EAGAIN) { ret = nfs_cache_wait_for_upcall(dreq); if (!ret) ret = do_cache_lookup_nowait(cd, key, item); } nfs_cache_defer_req_put(dreq); out: return ret; } ssize_t nfs_dns_resolve_name(char *name, size_t namelen, struct sockaddr *sa, size_t salen) { struct nfs_dns_ent key = { .hostname = name, .namelen = namelen, }; struct nfs_dns_ent *item = NULL; ssize_t ret; ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item); if (ret == 0) { if (salen >= item->addrlen) { memcpy(sa, &item->addr, item->addrlen); ret = item->addrlen; } else ret = -EOVERFLOW; cache_put(&item->h, &nfs_dns_resolve); } else if (ret == -ENOENT) ret = -ESRCH; return ret; } int nfs_dns_resolver_init(void) { return nfs_cache_register(&nfs_dns_resolve); } void nfs_dns_resolver_destroy(void) { nfs_cache_unregister(&nfs_dns_resolve); }
gpl-2.0
brycecr/linux
drivers/net/ethernet/netx-eth.c
1059
13737
/* * drivers/net/ethernet/netx-eth.c * * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/mii.h> #include <asm/io.h> #include <mach/hardware.h> #include <mach/netx-regs.h> #include <mach/pfifo.h> #include <mach/xc.h> #include <linux/platform_data/eth-netx.h> /* XC Fifo Offsets */ #define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */ #define IND_FIFO_PORT_HI(xcno) (1 + ((xcno) << 3)) /* Index of the FIFO where received */ /* Data packages are indicated by XC */ #define IND_FIFO_PORT_LO(xcno) (2 + ((xcno) << 3)) /* Index of the FIFO where received */ /* Data packages are indicated by XC */ #define REQ_FIFO_PORT_HI(xcno) (3 + ((xcno) << 3)) /* Index of the FIFO where Data packages */ /* have to be indicated by ARM which */ /* shall be sent */ #define REQ_FIFO_PORT_LO(xcno) (4 + ((xcno) << 3)) /* Index of the FIFO where Data packages */ /* have to be indicated by ARM which shall */ /* be sent */ #define CON_FIFO_PORT_HI(xcno) (5 + ((xcno) << 3)) /* Index of the FIFO where sent Data packages */ /* are confirmed */ #define CON_FIFO_PORT_LO(xcno) (6 + ((xcno) << 3)) /* Index of the FIFO where sent Data */ /* packages are confirmed */ #define PFIFO_MASK(xcno) (0x7f << (xcno*8)) #define FIFO_PTR_FRAMELEN_SHIFT 0 #define FIFO_PTR_FRAMELEN_MASK (0x7ff << 0) #define FIFO_PTR_FRAMELEN(len) (((len) << 0) & FIFO_PTR_FRAMELEN_MASK) #define FIFO_PTR_TIMETRIG (1<<11) #define FIFO_PTR_MULTI_REQ #define FIFO_PTR_ORIGIN (1<<14) #define FIFO_PTR_VLAN (1<<15) #define FIFO_PTR_FRAMENO_SHIFT 16 #define FIFO_PTR_FRAMENO_MASK (0x3f << 16) #define FIFO_PTR_FRAMENO(no) (((no) << 16) & FIFO_PTR_FRAMENO_MASK) #define FIFO_PTR_SEGMENT_SHIFT 22 #define FIFO_PTR_SEGMENT_MASK (0xf << 22) #define FIFO_PTR_SEGMENT(seg) (((seg) & 0xf) << 22) #define FIFO_PTR_ERROR_SHIFT 28 #define FIFO_PTR_ERROR_MASK (0xf << 28) #define ISR_LINK_STATUS_CHANGE (1<<4) #define ISR_IND_LO (1<<3) #define ISR_CON_LO (1<<2) #define ISR_IND_HI (1<<1) #define ISR_CON_HI (1<<0) #define ETH_MAC_LOCAL_CONFIG 0x1560 #define ETH_MAC_4321 0x1564 #define ETH_MAC_65 0x1568 #define MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT 16 #define MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK (0xf<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT) #define MAC_TRAFFIC_CLASS_ARRANGEMENT(x) (((x)<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT) & MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK) #define LOCAL_CONFIG_LINK_STATUS_IRQ_EN (1<<24) #define LOCAL_CONFIG_CON_LO_IRQ_EN (1<<23) #define LOCAL_CONFIG_CON_HI_IRQ_EN (1<<22) #define LOCAL_CONFIG_IND_LO_IRQ_EN (1<<21) #define LOCAL_CONFIG_IND_HI_IRQ_EN (1<<20) #define CARDNAME "netx-eth" /* LSB must be zero */ #define INTERNAL_PHY_ADR 0x1c struct netx_eth_priv { void __iomem *sram_base, *xpec_base, *xmac_base; int id; struct mii_if_info mii; u32 msg_enable; struct xc *xc; spinlock_t lock; }; static void netx_eth_set_multicast_list(struct net_device *ndev) { /* implement me */ } static int netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); unsigned char *buf = skb->data; unsigned int len = skb->len; spin_lock_irq(&priv->lock); memcpy_toio(priv->sram_base + 1560, (void *)buf, len); if (len < 60) { memset_io(priv->sram_base + 1560 + len, 0, 60 - len); len = 60; } pfifo_push(REQ_FIFO_PORT_LO(priv->id), FIFO_PTR_SEGMENT(priv->id) | FIFO_PTR_FRAMENO(1) | FIFO_PTR_FRAMELEN(len)); ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; netif_stop_queue(ndev); spin_unlock_irq(&priv->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void netx_eth_receive(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); unsigned int val, frameno, seg, len; unsigned char *data; struct sk_buff *skb; val = pfifo_pop(IND_FIFO_PORT_LO(priv->id)); frameno = (val & FIFO_PTR_FRAMENO_MASK) >> FIFO_PTR_FRAMENO_SHIFT; seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT; len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT; skb = netdev_alloc_skb(ndev, len); if (unlikely(skb == NULL)) { ndev->stats.rx_dropped++; return; } data = skb_put(skb, len); memcpy_fromio(data, priv->sram_base + frameno * 1560, len); pfifo_push(EMPTY_PTR_FIFO(priv->id), FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; } static irqreturn_t netx_eth_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct netx_eth_priv *priv = netdev_priv(ndev); int status; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); status = readl(NETX_PFIFO_XPEC_ISR(priv->id)); while (status) { int fill_level; writel(status, NETX_PFIFO_XPEC_ISR(priv->id)); if ((status & ISR_CON_HI) || (status & ISR_IND_HI)) printk("%s: unexpected status: 0x%08x\n", __func__, status); fill_level = readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id))); while (fill_level--) netx_eth_receive(ndev); if (status & ISR_CON_LO) netif_wake_queue(ndev); if (status & ISR_LINK_STATUS_CHANGE) mii_check_media(&priv->mii, netif_msg_link(priv), 1); status = readl(NETX_PFIFO_XPEC_ISR(priv->id)); } spin_unlock_irqrestore(&priv->lock, flags); return IRQ_HANDLED; } static int netx_eth_open(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); if (request_irq (ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev)) return -EAGAIN; writel(ndev->dev_addr[0] | ndev->dev_addr[1]<<8 | ndev->dev_addr[2]<<16 | ndev->dev_addr[3]<<24, priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321); writel(ndev->dev_addr[4] | ndev->dev_addr[5]<<8, priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65); writel(LOCAL_CONFIG_LINK_STATUS_IRQ_EN | LOCAL_CONFIG_CON_LO_IRQ_EN | LOCAL_CONFIG_CON_HI_IRQ_EN | LOCAL_CONFIG_IND_LO_IRQ_EN | LOCAL_CONFIG_IND_HI_IRQ_EN, priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG); mii_check_media(&priv->mii, netif_msg_link(priv), 1); netif_start_queue(ndev); return 0; } static int netx_eth_close(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); writel(0, priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG); free_irq(ndev->irq, ndev); return 0; } static void netx_eth_timeout(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); int i; printk(KERN_ERR "%s: transmit timed out, resetting\n", ndev->name); spin_lock_irq(&priv->lock); xc_reset(priv->xc); xc_start(priv->xc); for (i=2; i<=18; i++) pfifo_push(EMPTY_PTR_FIFO(priv->id), FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id)); spin_unlock_irq(&priv->lock); netif_wake_queue(ndev); } static int netx_eth_phy_read(struct net_device *ndev, int phy_id, int reg) { unsigned int val; val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) | MIIMU_REGADDR(reg) | MIIMU_PHY_NRES; writel(val, NETX_MIIMU); while (readl(NETX_MIIMU) & MIIMU_SNRDY); return readl(NETX_MIIMU) >> 16; } static void netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value) { unsigned int val; val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) | MIIMU_REGADDR(reg) | MIIMU_PHY_NRES | MIIMU_OPMODE_WRITE | MIIMU_DATA(value); writel(val, NETX_MIIMU); while (readl(NETX_MIIMU) & MIIMU_SNRDY); } static const struct net_device_ops netx_eth_netdev_ops = { .ndo_open = netx_eth_open, .ndo_stop = netx_eth_close, .ndo_start_xmit = netx_eth_hard_start_xmit, .ndo_tx_timeout = netx_eth_timeout, .ndo_set_rx_mode = netx_eth_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static int netx_eth_enable(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); unsigned int mac4321, mac65; int running, i; ndev->netdev_ops = &netx_eth_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(5000); priv->msg_enable = NETIF_MSG_LINK; priv->mii.phy_id_mask = 0x1f; priv->mii.reg_num_mask = 0x1f; priv->mii.force_media = 0; priv->mii.full_duplex = 0; priv->mii.dev = ndev; priv->mii.mdio_read = netx_eth_phy_read; priv->mii.mdio_write = netx_eth_phy_write; priv->mii.phy_id = INTERNAL_PHY_ADR + priv->id; running = xc_running(priv->xc); xc_stop(priv->xc); /* if the xc engine is already running, assume the bootloader has * loaded the firmware for us */ if (running) { /* get Node Address from hardware */ mac4321 = readl(priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321); mac65 = readl(priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65); ndev->dev_addr[0] = mac4321 & 0xff; ndev->dev_addr[1] = (mac4321 >> 8) & 0xff; ndev->dev_addr[2] = (mac4321 >> 16) & 0xff; ndev->dev_addr[3] = (mac4321 >> 24) & 0xff; ndev->dev_addr[4] = mac65 & 0xff; ndev->dev_addr[5] = (mac65 >> 8) & 0xff; } else { if (xc_request_firmware(priv->xc)) { printk(CARDNAME ": requesting firmware failed\n"); return -ENODEV; } } xc_reset(priv->xc); xc_start(priv->xc); if (!is_valid_ether_addr(ndev->dev_addr)) printk("%s: Invalid ethernet MAC address. Please " "set using ifconfig\n", ndev->name); for (i=2; i<=18; i++) pfifo_push(EMPTY_PTR_FIFO(priv->id), FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id)); return register_netdev(ndev); } static int netx_eth_drv_probe(struct platform_device *pdev) { struct netx_eth_priv *priv; struct net_device *ndev; struct netxeth_platform_data *pdata; int ret; ndev = alloc_etherdev(sizeof (struct netx_eth_priv)); if (!ndev) { ret = -ENOMEM; goto exit; } SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); pdata = dev_get_platdata(&pdev->dev); priv->xc = request_xc(pdata->xcno, &pdev->dev); if (!priv->xc) { dev_err(&pdev->dev, "unable to request xc engine\n"); ret = -ENODEV; goto exit_free_netdev; } ndev->irq = priv->xc->irq; priv->id = pdev->id; priv->xpec_base = priv->xc->xpec_base; priv->xmac_base = priv->xc->xmac_base; priv->sram_base = priv->xc->sram_base; spin_lock_init(&priv->lock); ret = pfifo_request(PFIFO_MASK(priv->id)); if (ret) { printk("unable to request PFIFO\n"); goto exit_free_xc; } ret = netx_eth_enable(ndev); if (ret) goto exit_free_pfifo; return 0; exit_free_pfifo: pfifo_free(PFIFO_MASK(priv->id)); exit_free_xc: free_xc(priv->xc); exit_free_netdev: free_netdev(ndev); exit: return ret; } static int netx_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netx_eth_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); xc_stop(priv->xc); free_xc(priv->xc); free_netdev(ndev); pfifo_free(PFIFO_MASK(priv->id)); return 0; } static int netx_eth_drv_suspend(struct platform_device *pdev, pm_message_t state) { dev_err(&pdev->dev, "suspend not implemented\n"); return 0; } static int netx_eth_drv_resume(struct platform_device *pdev) { dev_err(&pdev->dev, "resume not implemented\n"); return 0; } static struct platform_driver netx_eth_driver = { .probe = netx_eth_drv_probe, .remove = netx_eth_drv_remove, .suspend = netx_eth_drv_suspend, .resume = netx_eth_drv_resume, .driver = { .name = CARDNAME, }, }; static int __init netx_eth_init(void) { unsigned int phy_control, val; printk("NetX Ethernet driver\n"); phy_control = PHY_CONTROL_PHY_ADDRESS(INTERNAL_PHY_ADR>>1) | PHY_CONTROL_PHY1_MODE(PHY_MODE_ALL) | PHY_CONTROL_PHY1_AUTOMDIX | PHY_CONTROL_PHY1_EN | PHY_CONTROL_PHY0_MODE(PHY_MODE_ALL) | PHY_CONTROL_PHY0_AUTOMDIX | PHY_CONTROL_PHY0_EN | PHY_CONTROL_CLK_XLATIN; val = readl(NETX_SYSTEM_IOC_ACCESS_KEY); writel(val, NETX_SYSTEM_IOC_ACCESS_KEY); writel(phy_control | PHY_CONTROL_RESET, NETX_SYSTEM_PHY_CONTROL); udelay(100); val = readl(NETX_SYSTEM_IOC_ACCESS_KEY); writel(val, NETX_SYSTEM_IOC_ACCESS_KEY); writel(phy_control, NETX_SYSTEM_PHY_CONTROL); return platform_driver_register(&netx_eth_driver); } static void __exit netx_eth_cleanup(void) { platform_driver_unregister(&netx_eth_driver); } module_init(netx_eth_init); module_exit(netx_eth_cleanup); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" CARDNAME); MODULE_FIRMWARE("xc0.bin"); MODULE_FIRMWARE("xc1.bin"); MODULE_FIRMWARE("xc2.bin");
gpl-2.0
davidmueller13/android_kernel_lge_msm8974-old
arch/arm/mach-msm/platsmp-8625.c
1827
8023
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/smp.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/hardware/gic.h> #include <asm/smp_scu.h> #include <asm/unified.h> #include <mach/msm_iomap.h> #include "pm.h" #include "platsmp.h" #define CORE_RESET_BASE 0xA8600590 #define MSM_CORE_STATUS_MSK 0x02800000 static DEFINE_PER_CPU(bool, cold_boot_done); struct per_cpu_data { unsigned int reset_off; unsigned int offset; unsigned int ipc_irq; void __iomem *reset_core_base; }; static uint32_t *msm8625_boot_vector; static struct per_cpu_data cpu_data[CONFIG_NR_CPUS]; static void __iomem *scu_base_addr(void) { return MSM_SCU_BASE; } static DEFINE_SPINLOCK(boot_lock); /* * MP_CORE_IPC will be used to generate interrupt and can be used by either * of core. * To bring secondary cores out of GDFS we need to raise the SPI using the * MP_CORE_IPC. */ static void raise_clear_spi(unsigned int cpu, bool set) { int value; value = __raw_readl(MSM_CSR_BASE + 0x54); if (set) __raw_writel(value | BIT(cpu), MSM_CSR_BASE + 0x54); else __raw_writel(value & ~BIT(cpu), MSM_CSR_BASE + 0x54); mb(); } static void clear_pending_spi(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); struct irq_chip *c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(irq); local_irq_enable(); } void __cpuinit msm8625_platform_secondary_init(unsigned int cpu) { WARN_ON(msm_platform_secondary_init(cpu)); /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled * for us: do so */ gic_secondary_init(0); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ write_pen_release(-1); /* clear the IPC pending SPI */ if (per_cpu(power_collapsed, cpu)) { raise_clear_spi(cpu, false); clear_pending_spi(cpu_data[cpu].ipc_irq); per_cpu(power_collapsed, cpu) = 0; } /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); } static int __cpuinit msm8625_release_secondary(unsigned int cpu) { void __iomem *base_ptr; int value = 0; unsigned long timeout; /* * loop to ensure that the GHS_STATUS_CORE1 bit in the * MPA5_STATUS_REG(0x3c) is set. The timeout for the while * loop can be set as 20us as of now */ timeout = jiffies + usecs_to_jiffies(20); while (time_before(jiffies, timeout)) { value = __raw_readl(MSM_CFG_CTL_BASE + cpu_data[cpu].offset); if ((value & MSM_CORE_STATUS_MSK) == MSM_CORE_STATUS_MSK) break; udelay(1); } if (!value) { pr_err("Core %u cannot be brought out of Reset!!!\n", cpu); return -ENODEV; } base_ptr = ioremap_nocache(CORE_RESET_BASE + cpu_data[cpu].reset_off, SZ_4); if (!base_ptr) return -ENODEV; /* Reset core out of reset */ __raw_writel(0x0, base_ptr); mb(); cpu_data[cpu].reset_core_base = base_ptr; return 0; } void __iomem *core_reset_base(unsigned int cpu) { return cpu_data[cpu].reset_core_base; } int __cpuinit msm8625_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; preset_lpj = loops_per_jiffy; if (per_cpu(cold_boot_done, cpu) == false) { if (msm8625_release_secondary(cpu)) { pr_err("Failed to release core %u\n", cpu); return -ENODEV; } per_cpu(cold_boot_done, cpu) = true; } /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary * CPUs in the holding pen until we're ready for them. However, * since we haven't sent them a soft interrupt, they shouldn't * be there. */ write_pen_release(cpu); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. * * power_collapsed is the flag which will be updated for Powercollapse. * Once we are out of PC, as secondary cores will be in the state of * GDFS which needs to be brought out by raising an SPI. */ if (per_cpu(power_collapsed, cpu)) { gic_configure_and_raise(cpu_data[cpu].ipc_irq, cpu); raise_clear_spi(cpu, true); } else { gic_raise_softirq(cpumask_of(cpu), 1); } timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; } /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init msm8625_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); } static void per_cpu_data(unsigned int cpu, unsigned int off, unsigned int off1, unsigned int irq) { cpu_data[cpu].reset_off = off; cpu_data[cpu].offset = off1; cpu_data[cpu].ipc_irq = irq; } static void enable_boot_remapper(unsigned long bit, unsigned int off) { int value; /* Enable boot remapper address */ value = __raw_readl(MSM_CFG_CTL_BASE + off); __raw_writel(value | bit, MSM_CFG_CTL_BASE + off) ; mb(); } static void remapper_address(unsigned long phys, unsigned int off) { /* * Write the address of secondary startup into the * boot remapper register. The secondary CPU branches to this address. */ __raw_writel(phys, (MSM_CFG_CTL_BASE + off)); mb(); } static void __init msm8625_boot_vector_init(uint32_t *boot_vector, unsigned long entry) { if (!boot_vector) return; msm8625_boot_vector = boot_vector; msm8625_boot_vector[0] = 0xE51FF004; /* ldr pc, 4 */ msm8625_boot_vector[1] = entry; } void __init msm8625_platform_smp_prepare_cpus(unsigned int max_cpus) { int cpu, value; void __iomem *cpu_ptr; scu_enable(scu_base_addr()); cpu_ptr = ioremap_nocache(MSM8625_CPU_PHYS, SZ_8); if (!cpu_ptr) { pr_err("failed to ioremap for secondary cores\n"); return; } msm8625_boot_vector_init(cpu_ptr, virt_to_phys(msm_secondary_startup)); iounmap(cpu_ptr); for_each_possible_cpu(cpu) { switch (cpu) { case 0: break; case 1: remapper_address(MSM8625_CPU_PHYS, 0x34); per_cpu_data(cpu, 0x0, 0x3c, MSM8625_INT_ACSR_MP_CORE_IPC1); enable_boot_remapper(BIT(26), 0x30); break; case 2: remapper_address((MSM8625_CPU_PHYS >> 16), 0x4C); per_cpu_data(cpu, 0x8, 0x50, MSM8625_INT_ACSR_MP_CORE_IPC2); enable_boot_remapper(BIT(25), 0x48); break; case 3: value = __raw_readl(MSM_CFG_CTL_BASE + 0x4C); remapper_address(value | MSM8625_CPU_PHYS, 0x4C); per_cpu_data(cpu, 0xC, 0x50, MSM8625_INT_ACSR_MP_CORE_IPC3); enable_boot_remapper(BIT(26), 0x48); break; } } } struct smp_operations msm8625_smp_ops __initdata = { .smp_init_cpus = msm8625_smp_init_cpus, .smp_prepare_cpus = msm8625_platform_smp_prepare_cpus, .smp_secondary_init = msm8625_platform_secondary_init, .smp_boot_secondary = msm8625_boot_secondary, .cpu_kill = platform_cpu_kill, .cpu_die = platform_cpu_die, .cpu_disable = platform_cpu_disable };
gpl-2.0
schacon/linux
drivers/rtc/rtc-sun4v.c
2083
2231
/* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems. * * Copyright (C) 2008 David S. Miller <davem@davemloft.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <asm/hypervisor.h> static unsigned long hypervisor_get_time(void) { unsigned long ret, time; int retries = 10000; retry: ret = sun4v_tod_get(&time); if (ret == HV_EOK) return time; if (ret == HV_EWOULDBLOCK) { if (--retries > 0) { udelay(100); goto retry; } pr_warn("tod_get() timed out.\n"); return 0; } pr_warn("tod_get() not supported.\n"); return 0; } static int sun4v_read_time(struct device *dev, struct rtc_time *tm) { rtc_time_to_tm(hypervisor_get_time(), tm); return 0; } static int hypervisor_set_time(unsigned long secs) { unsigned long ret; int retries = 10000; retry: ret = sun4v_tod_set(secs); if (ret == HV_EOK) return 0; if (ret == HV_EWOULDBLOCK) { if (--retries > 0) { udelay(100); goto retry; } pr_warn("tod_set() timed out.\n"); return -EAGAIN; } pr_warn("tod_set() not supported.\n"); return -EOPNOTSUPP; } static int sun4v_set_time(struct device *dev, struct rtc_time *tm) { unsigned long secs; int err; err = rtc_tm_to_time(tm, &secs); if (err) return err; return hypervisor_set_time(secs); } static const struct rtc_class_ops sun4v_rtc_ops = { .read_time = sun4v_read_time, .set_time = sun4v_set_time, }; static int __init sun4v_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; rtc = devm_rtc_device_register(&pdev->dev, "sun4v", &sun4v_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); platform_set_drvdata(pdev, rtc); return 0; } static int __exit sun4v_rtc_remove(struct platform_device *pdev) { return 0; } static struct platform_driver sun4v_rtc_driver = { .driver = { .name = "rtc-sun4v", .owner = THIS_MODULE, }, .remove = __exit_p(sun4v_rtc_remove), }; module_platform_driver_probe(sun4v_rtc_driver, sun4v_rtc_probe); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("SUN4V RTC driver"); MODULE_LICENSE("GPL");
gpl-2.0
tejasjadhav/android_kernel_mocha
drivers/net/ethernet/sun/sunbmac.c
2083
33608
/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/gfp.h> #include <asm/auxio.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/idprom.h> #include <asm/io.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/pgtable.h> #include "sunbmac.h" #define DRV_NAME "sunbmac" #define DRV_VERSION "2.1" #define DRV_RELDATE "August 26, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); MODULE_LICENSE("GPL"); #undef DEBUG_PROBE #undef DEBUG_TX #undef DEBUG_IRQ #ifdef DEBUG_PROBE #define DP(x) printk x #else #define DP(x) #endif #ifdef DEBUG_TX #define DTX(x) printk x #else #define DTX(x) #endif #ifdef DEBUG_IRQ #define DIRQ(x) printk x #else #define DIRQ(x) #endif #define DEFAULT_JAMSIZE 4 /* Toe jam */ #define QEC_RESET_TRIES 200 static int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); return -1; } static void qec_init(struct bigmac *bp) { struct platform_device *qec_op = bp->qec_op; void __iomem *gregs = bp->gregs; u8 bsizes = bp->bigmac_bursts; u32 regval; /* 64byte bursts do not work at the moment, do * not even try to enable them. -DaveM */ if (bsizes & DMA_BURST32) regval = GLOB_CTRL_B32; else regval = GLOB_CTRL_B16; sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); /* All of memsize is given to bigmac. */ sbus_writel(resource_size(&qec_op->resource[1]), gregs + GLOB_MSIZE); /* Half to the transmitter, half to the receiver. */ sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_TSIZE); sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_RSIZE); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 static void bigmac_tx_reset(void __iomem *bregs) { int tries = TX_RESET_TRIES; sbus_writel(0, bregs + BMAC_TXCFG); /* The fifo threshold bit is read-only and does * not clear. -DaveM */ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && --tries != 0) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", sbus_readl(bregs + BMAC_TXCFG)); } } static void bigmac_rx_reset(void __iomem *bregs) { int tries = RX_RESET_TRIES; sbus_writel(0, bregs + BMAC_RXCFG); while (sbus_readl(bregs + BMAC_RXCFG) && --tries) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", sbus_readl(bregs + BMAC_RXCFG)); } } /* Reset the transmitter and receiver. */ static void bigmac_stop(struct bigmac *bp) { bigmac_tx_reset(bp->bregs); bigmac_rx_reset(bp->bregs); } static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) { struct net_device_stats *stats = &bp->enet_stats; stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); sbus_writel(0, bregs + BMAC_RCRCECTR); stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); sbus_writel(0, bregs + BMAC_UNALECTR); stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); sbus_writel(0, bregs + BMAC_GLECTR); stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); stats->collisions += (sbus_readl(bregs + BMAC_EXCTR) + sbus_readl(bregs + BMAC_LTCTR)); sbus_writel(0, bregs + BMAC_EXCTR); sbus_writel(0, bregs + BMAC_LTCTR); } static void bigmac_clean_rings(struct bigmac *bp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (bp->rx_skbs[i] != NULL) { dev_kfree_skb_any(bp->rx_skbs[i]); bp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (bp->tx_skbs[i] != NULL) { dev_kfree_skb_any(bp->tx_skbs[i]); bp->tx_skbs[i] = NULL; } } } static void bigmac_init_rings(struct bigmac *bp, int from_irq) { struct bmac_init_block *bb = bp->bmac_block; int i; gfp_t gfp_flags = GFP_KERNEL; if (from_irq || in_interrupt()) gfp_flags = GFP_ATOMIC; bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ bigmac_clean_rings(bp); /* Now get new skbufs for the receive ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); if (!skb) continue; bp->rx_skbs[i] = skb; /* Because we reserve afterwards. */ skb_put(skb, ETH_FRAME_LEN); skb_reserve(skb, 34); bb->be_rxd[i].rx_addr = dma_map_single(&bp->bigmac_op->dev, skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bb->be_rxd[i].rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); } for (i = 0; i < TX_RING_SIZE; i++) bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; } #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) static void idle_transceiver(void __iomem *tregs) { int i = 20; while (i--) { sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } } static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) { if (bp->tcvr_type == internal) { bit = (bit & 1) << 3; sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { bit = (bit & 1) << 2; sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); } } static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; } else { printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); } return retval; } static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); } return retval; } static void put_tcvr_byte(struct bigmac *bp, void __iomem *tregs, unsigned int byte) { int shift = 4; do { write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); shift -= 1; } while (shift >= 0); } static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, int reg, unsigned short val) { int shift; reg &= 0xff; val &= 0xffff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); shift = 15; do { write_tcvr_bit(bp, tregs, (val >> shift) & 1); shift -= 1; } while (shift >= 0); } static unsigned short bigmac_tcvr_read(struct bigmac *bp, void __iomem *tregs, int reg) { unsigned short retval = 0; reg &= 0xff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return 0xffff; } idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); if (bp->tcvr_type == external) { int shift = 15; (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); do { int tmp; tmp = read_tcvr_bit2(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); } else { int shift = 15; (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); do { int tmp; tmp = read_tcvr_bit(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); } return retval; } static void bigmac_tcvr_init(struct bigmac *bp) { void __iomem *tregs = bp->tregs; u32 mpal; idle_transceiver(tregs); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); /* Only the bit for the present transceiver (internal or * external) will stick, set them both and see what stays. */ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); udelay(20); mpal = sbus_readl(tregs + TCVR_MPAL); if (mpal & MGMT_PAL_EXT_MDIO) { bp->tcvr_type = external; sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else if (mpal & MGMT_PAL_INT_MDIO) { bp->tcvr_type = internal; sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else { printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " "external MDIO available!\n"); printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", sbus_readl(tregs + TCVR_MPAL), sbus_readl(tregs + TCVR_TPAL)); } } static int bigmac_init_hw(struct bigmac *, int); static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) { if (bp->sw_bmcr & BMCR_SPEED100) { int timeout; /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); /* Now we try 10baseT. */ bp->sw_bmcr &= ~(BMCR_SPEED100); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); return 0; } /* We've tried them all. */ return -1; } static void bigmac_timer(unsigned long data) { struct bigmac *bp = (struct bigmac *) data; void __iomem *tregs = bp->tregs; int restart_timer = 0; bp->timer_ticks++; if (bp->timer_state == ltrywait) { bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); if (bp->sw_bmsr & BMSR_LSTATUS) { printk(KERN_INFO "%s: Link is now up at %s.\n", bp->dev->name, (bp->sw_bmcr & BMCR_SPEED100) ? "100baseT" : "10baseT"); bp->timer_state = asleep; restart_timer = 0; } else { if (bp->timer_ticks >= 4) { int ret; ret = try_next_permutation(bp, tregs); if (ret == -1) { printk(KERN_ERR "%s: Link down, cable problem?\n", bp->dev->name); ret = bigmac_init_hw(bp, 0); if (ret) { printk(KERN_ERR "%s: Error, cannot re-init the " "BigMAC.\n", bp->dev->name); } return; } bp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } } else { /* Can't happens.... */ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", bp->dev->name); restart_timer = 0; bp->timer_ticks = 0; bp->timer_state = asleep; /* foo on you */ } if (restart_timer != 0) { bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&bp->bigmac_timer); } } /* Well, really we just force the chip into 100baseT then * 10baseT, each time checking for a link status. */ static void bigmac_begin_auto_negotiation(struct bigmac *bp) { void __iomem *tregs = bp->tregs; int timeout; /* Grab new software copies of PHY registers. */ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); /* First we try 100baseT. */ bp->sw_bmcr |= BMCR_SPEED100; bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bp->timer_state = ltrywait; bp->timer_ticks = 0; bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; bp->bigmac_timer.data = (unsigned long) bp; bp->bigmac_timer.function = bigmac_timer; add_timer(&bp->bigmac_timer); } static int bigmac_init_hw(struct bigmac *bp, int from_irq) { void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); /* Reset QEC. */ qec_global_reset(gregs); /* Init QEC. */ qec_init(bp); /* Alloc and reset the tx/rx descriptor chains. */ bigmac_init_rings(bp, from_irq); /* Initialize the PHY. */ bigmac_tcvr_init(bp); /* Stop transmitter and receiver. */ bigmac_stop(bp); /* Set hardware ethernet address. */ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); /* Clear the hash table until mc upload occurs. */ sbus_writel(0, bregs + BMAC_HTABLE3); sbus_writel(0, bregs + BMAC_HTABLE2); sbus_writel(0, bregs + BMAC_HTABLE1); sbus_writel(0, bregs + BMAC_HTABLE0); /* Enable Big Mac hash table filter. */ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, bregs + BMAC_RXCFG); udelay(20); /* Ok, configure the Big Mac transmitter. */ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); /* The HME docs recommend to use the 10LSB of our MAC here. */ sbus_writel(((e[5] | e[4] << 8) & 0x3ff), bregs + BMAC_RSEED); /* Enable the output drivers no matter what. */ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, bregs + BMAC_XIFCFG); /* Tell the QEC where the ring descriptors are. */ sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS); /* Setup the FIFO pointers into QEC local memory. */ sbus_writel(0, cregs + CREG_RXRBUFPTR); sbus_writel(0, cregs + CREG_RXWBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXRBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXWBUFPTR); /* Tell bigmac what interrupts we don't want to hear about. */ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, bregs + BMAC_IMASK); /* Enable the various other irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(0, cregs + CREG_BMASK); /* Set jam size to a reasonable default. */ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); /* Clear collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* Enable transmitter and receiver. */ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, bregs + BMAC_TXCFG); sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, bregs + BMAC_RXCFG); /* Ok, start detecting link speed/duplex. */ bigmac_begin_auto_negotiation(bp); /* Success. */ return 0; } /* Error interrupts get sent here. */ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) { printk(KERN_ERR "bigmac_is_medium_rare: "); if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { if (qec_status & GLOB_STAT_ER) printk("QEC_ERROR, "); if (qec_status & GLOB_STAT_BM) printk("QEC_BMAC_ERROR, "); } if (bmac_status & CREG_STAT_ERRORS) { if (bmac_status & CREG_STAT_BERROR) printk("BMAC_ERROR, "); if (bmac_status & CREG_STAT_TXDERROR) printk("TXD_ERROR, "); if (bmac_status & CREG_STAT_TXLERR) printk("TX_LATE_ERROR, "); if (bmac_status & CREG_STAT_TXPERR) printk("TX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_TXSERR) printk("TX_SBUS_ERROR, "); if (bmac_status & CREG_STAT_RXDROP) printk("RX_DROP_ERROR, "); if (bmac_status & CREG_STAT_RXSMALL) printk("RX_SMALL_ERROR, "); if (bmac_status & CREG_STAT_RXLERR) printk("RX_LATE_ERROR, "); if (bmac_status & CREG_STAT_RXPERR) printk("RX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_RXSERR) printk("RX_SBUS_ERROR, "); } printk(" RESET\n"); bigmac_init_hw(bp, 1); } /* BigMAC transmit complete service routines. */ static void bigmac_tx(struct bigmac *bp) { struct be_txd *txbase = &bp->bmac_block->be_txd[0]; struct net_device *dev = bp->dev; int elem; spin_lock(&bp->lock); elem = bp->tx_old; DTX(("bigmac_tx: tx_old[%d] ", elem)); while (elem != bp->tx_new) { struct sk_buff *skb; struct be_txd *this = &txbase[elem]; DTX(("this(%p) [flags(%08x)addr(%08x)]", this, this->tx_flags, this->tx_addr)); if (this->tx_flags & TXD_OWN) break; skb = bp->tx_skbs[elem]; bp->enet_stats.tx_packets++; bp->enet_stats.tx_bytes += skb->len; dma_unmap_single(&bp->bigmac_op->dev, this->tx_addr, skb->len, DMA_TO_DEVICE); DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; dev_kfree_skb_irq(skb); elem = NEXT_TX(elem); } DTX((" DONE, tx_old=%d\n", elem)); bp->tx_old = elem; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(bp) > 0) netif_wake_queue(bp->dev); spin_unlock(&bp->lock); } /* BigMAC receive complete service routines. */ static void bigmac_rx(struct bigmac *bp) { struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; struct be_rxd *this; int elem = bp->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; int len = (flags & RXD_LENGTH); /* FCS not included */ /* Check for errors. */ if (len < ETH_ZLEN) { bp->enet_stats.rx_errors++; bp->enet_stats.rx_length_errors++; drop_it: /* Return it to the BigMAC. */ bp->enet_stats.rx_dropped++; this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); goto next; } skb = bp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_single(&bp->bigmac_op->dev, this->rx_addr, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bp->rx_skbs[elem] = new_skb; skb_put(new_skb, ETH_FRAME_LEN); skb_reserve(new_skb, 34); this->rx_addr = dma_map_single(&bp->bigmac_op->dev, new_skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); dma_sync_single_for_device(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); skb = copy_skb; } /* No checksums done by the BigMAC ;-( */ skb->protocol = eth_type_trans(skb, bp->dev); netif_rx(skb); bp->enet_stats.rx_packets++; bp->enet_stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } bp->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); } static irqreturn_t bigmac_interrupt(int irq, void *dev_id) { struct bigmac *bp = (struct bigmac *) dev_id; u32 qec_status, bmac_status; DIRQ(("bigmac_interrupt: ")); /* Latch status registers now. */ bmac_status = sbus_readl(bp->creg + CREG_STAT); qec_status = sbus_readl(bp->gregs + GLOB_STAT); DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || (bmac_status & CREG_STAT_ERRORS)) bigmac_is_medium_rare(bp, qec_status, bmac_status); if (bmac_status & CREG_STAT_TXIRQ) bigmac_tx(bp); if (bmac_status & CREG_STAT_RXIRQ) bigmac_rx(bp); return IRQ_HANDLED; } static int bigmac_open(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int ret; ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); if (ret) { printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); return ret; } init_timer(&bp->bigmac_timer); ret = bigmac_init_hw(bp, 0); if (ret) free_irq(dev->irq, bp); return ret; } static int bigmac_close(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); del_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; bigmac_stop(bp); bigmac_clean_rings(bp); free_irq(dev->irq, bp); return 0; } static void bigmac_tx_timeout(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_init_hw(bp, 0); netif_wake_queue(dev); } /* Put a packet on the wire. */ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); int len, entry; u32 mapping; len = skb->len; mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, len, DMA_TO_DEVICE); /* Avoid a race... */ spin_lock_irq(&bp->lock); entry = bp->tx_new; DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bp->tx_skbs[entry] = skb; bp->bmac_block->be_txd[entry].tx_addr = mapping; bp->bmac_block->be_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); bp->tx_new = NEXT_TX(entry); if (TX_BUFFS_AVAIL(bp) <= 0) netif_stop_queue(dev); spin_unlock_irq(&bp->lock); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); return NETDEV_TX_OK; } static struct net_device_stats *bigmac_get_stats(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); bigmac_get_counters(bp, bp->bregs); return &bp->enet_stats; } static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; struct netdev_hw_addr *ha; int i; u32 tmp, crc; /* Disable the receiver. The bit self-clears when * the operation is complete. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp &= ~(BIGMAC_RXCFG_ENABLE); sbus_writel(tmp, bregs + BMAC_RXCFG); while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) udelay(20); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writel(0xffff, bregs + BMAC_HTABLE0); sbus_writel(0xffff, bregs + BMAC_HTABLE1); sbus_writel(0xffff, bregs + BMAC_HTABLE2); sbus_writel(0xffff, bregs + BMAC_HTABLE3); } else if (dev->flags & IFF_PROMISC) { tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { u16 hash_table[4]; for (i = 0; i < 4; i++) hash_table[i] = 0; netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); } /* Re-enable the receiver. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_ENABLE; sbus_writel(tmp, bregs + BMAC_RXCFG); } /* Ethtool support... */ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "sunbmac", sizeof(info->driver)); strlcpy(info->version, "2.0", sizeof(info->version)); } static u32 bigmac_get_link(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); spin_lock_irq(&bp->lock); bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); spin_unlock_irq(&bp->lock); return (bp->sw_bmsr & BMSR_LSTATUS); } static const struct ethtool_ops bigmac_ethtool_ops = { .get_drvinfo = bigmac_get_drvinfo, .get_link = bigmac_get_link, }; static const struct net_device_ops bigmac_ops = { .ndo_open = bigmac_open, .ndo_stop = bigmac_close, .ndo_start_xmit = bigmac_start_xmit, .ndo_get_stats = bigmac_get_stats, .ndo_set_rx_mode = bigmac_set_multicast, .ndo_tx_timeout = bigmac_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int bigmac_ether_init(struct platform_device *op, struct platform_device *qec_op) { static int version_printed; struct net_device *dev; u8 bsizes, bsizes_more; struct bigmac *bp; int i; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; if (version_printed++ == 0) printk(KERN_INFO "%s", version); for (i = 0; i < 6; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); bp->qec_op = qec_op; bp->bigmac_op = op; SET_NETDEV_DEV(dev, &op->dev); spin_lock_init(&bp->lock); /* Map in QEC global control registers. */ bp->gregs = of_ioremap(&qec_op->resource[0], 0, GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); if (!bp->gregs) { printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); goto fail_and_cleanup; } /* Make sure QEC is in BigMAC mode. */ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); goto fail_and_cleanup; } /* Reset the QEC. */ if (qec_global_reset(bp->gregs)) goto fail_and_cleanup; /* Get supported SBUS burst sizes. */ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes &= 0xff; if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32) == 0) bsizes = (DMA_BURST32 - 1); bp->bigmac_bursts = bsizes; /* Perform QEC initialization. */ qec_init(bp); /* Map in the BigMAC channel registers. */ bp->creg = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "BigMAC QEC Channel Regs"); if (!bp->creg) { printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC control registers. */ bp->bregs = of_ioremap(&op->resource[1], 0, BMAC_REG_SIZE, "BigMAC Primary Regs"); if (!bp->bregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC transceiver registers, this is how you poke at * the BigMAC's PHY. */ bp->tregs = of_ioremap(&op->resource[2], 0, TCVR_REG_SIZE, "BigMAC Transceiver Regs"); if (!bp->tregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); goto fail_and_cleanup; } /* Stop the BigMAC. */ bigmac_stop(bp); /* Allocate transmit/receive descriptor DVMA block. */ bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, PAGE_SIZE, &bp->bblock_dvma, GFP_ATOMIC); if (bp->bmac_block == NULL || bp->bblock_dvma == 0) goto fail_and_cleanup; /* Get the board revision of this BigMAC. */ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, "board-version", 1); /* Init auto-negotiation timer state. */ init_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; /* Backlink to generic net device struct. */ bp->dev = dev; /* Set links to our BigMAC open and close routines. */ dev->ethtool_ops = &bigmac_ethtool_ops; dev->netdev_ops = &bigmac_ops; dev->watchdog_timeo = 5*HZ; /* Finish net device registration. */ dev->irq = bp->bigmac_op->archdata.irqs[0]; dev->dma = 0; if (register_netdev(dev)) { printk(KERN_ERR "BIGMAC: Cannot register device.\n"); goto fail_and_cleanup; } dev_set_drvdata(&bp->bigmac_op->dev, bp); printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", dev->name, dev->dev_addr); return 0; fail_and_cleanup: /* Something went wrong, undo whatever we did so far. */ /* Free register mappings if any. */ if (bp->gregs) of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); if (bp->creg) of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); if (bp->bregs) of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); if (bp->tregs) of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); if (bp->bmac_block) dma_free_coherent(&bp->bigmac_op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); /* This also frees the co-located private data */ free_netdev(dev); return -ENODEV; } /* QEC can be the parent of either QuadEthernet or a BigMAC. We want * the latter. */ static int bigmac_sbus_probe(struct platform_device *op) { struct device *parent = op->dev.parent; struct platform_device *qec_op; qec_op = to_platform_device(parent); return bigmac_ether_init(op, qec_op); } static int bigmac_sbus_remove(struct platform_device *op) { struct bigmac *bp = dev_get_drvdata(&op->dev); struct device *parent = op->dev.parent; struct net_device *net_dev = bp->dev; struct platform_device *qec_op; qec_op = to_platform_device(parent); unregister_netdev(net_dev); of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id bigmac_sbus_match[] = { { .name = "be", }, {}, }; MODULE_DEVICE_TABLE(of, bigmac_sbus_match); static struct platform_driver bigmac_sbus_driver = { .driver = { .name = "sunbmac", .owner = THIS_MODULE, .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, .remove = bigmac_sbus_remove, }; module_platform_driver(bigmac_sbus_driver);
gpl-2.0
SerenityS/android_kernel_samsung_a8elte
sound/pci/ac97/ac97_codec.c
2083
95641
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Universal interface for Audio Codec '97 * * For more details look to AC '97 component specification revision 2.2 * by Intel Corporation (http://developer.intel.com). * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <sound/asoundef.h> #include <sound/initval.h> #include "ac97_id.h" #include "ac97_patch.c" MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Universal interface for Audio Codec '97"); MODULE_LICENSE("GPL"); static bool enable_loopback; module_param(enable_loopback, bool, 0444); MODULE_PARM_DESC(enable_loopback, "Enable AC97 ADC/DAC Loopback Control"); #ifdef CONFIG_SND_AC97_POWER_SAVE static int power_save = CONFIG_SND_AC97_POWER_SAVE_DEFAULT; module_param(power_save, int, 0644); MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " "(in second, 0 = disable)."); #endif /* */ struct ac97_codec_id { unsigned int id; unsigned int mask; const char *name; int (*patch)(struct snd_ac97 *ac97); int (*mpatch)(struct snd_ac97 *ac97); unsigned int flags; }; static const struct ac97_codec_id snd_ac97_codec_id_vendors[] = { { 0x41445300, 0xffffff00, "Analog Devices", NULL, NULL }, { 0x414b4d00, 0xffffff00, "Asahi Kasei", NULL, NULL }, { 0x414c4300, 0xffffff00, "Realtek", NULL, NULL }, { 0x414c4700, 0xffffff00, "Realtek", NULL, NULL }, /* * This is an _inofficial_ Aztech Labs entry * (value might differ from unknown official Aztech ID), * currently used by the AC97 emulation of the almost-AC97 PCI168 card. */ { 0x415a5400, 0xffffff00, "Aztech Labs (emulated)", NULL, NULL }, { 0x434d4900, 0xffffff00, "C-Media Electronics", NULL, NULL }, { 0x43525900, 0xffffff00, "Cirrus Logic", NULL, NULL }, { 0x43585400, 0xffffff00, "Conexant", NULL, NULL }, { 0x44543000, 0xffffff00, "Diamond Technology", NULL, NULL }, { 0x454d4300, 0xffffff00, "eMicro", NULL, NULL }, { 0x45838300, 0xffffff00, "ESS Technology", NULL, NULL }, { 0x48525300, 0xffffff00, "Intersil", NULL, NULL }, { 0x49434500, 0xffffff00, "ICEnsemble", NULL, NULL }, { 0x49544500, 0xffffff00, "ITE Tech.Inc", NULL, NULL }, { 0x4e534300, 0xffffff00, "National Semiconductor", NULL, NULL }, { 0x50534300, 0xffffff00, "Philips", NULL, NULL }, { 0x53494c00, 0xffffff00, "Silicon Laboratory", NULL, NULL }, { 0x53544d00, 0xffffff00, "STMicroelectronics", NULL, NULL }, { 0x54524100, 0xffffff00, "TriTech", NULL, NULL }, { 0x54584e00, 0xffffff00, "Texas Instruments", NULL, NULL }, { 0x56494100, 0xffffff00, "VIA Technologies", NULL, NULL }, { 0x57454300, 0xffffff00, "Winbond", NULL, NULL }, { 0x574d4c00, 0xffffff00, "Wolfson", NULL, NULL }, { 0x594d4800, 0xffffff00, "Yamaha", NULL, NULL }, { 0x83847600, 0xffffff00, "SigmaTel", NULL, NULL }, { 0, 0, NULL, NULL, NULL } }; static const struct ac97_codec_id snd_ac97_codec_ids[] = { { 0x41445303, 0xffffffff, "AD1819", patch_ad1819, NULL }, { 0x41445340, 0xffffffff, "AD1881", patch_ad1881, NULL }, { 0x41445348, 0xffffffff, "AD1881A", patch_ad1881, NULL }, { 0x41445360, 0xffffffff, "AD1885", patch_ad1885, NULL }, { 0x41445361, 0xffffffff, "AD1886", patch_ad1886, NULL }, { 0x41445362, 0xffffffff, "AD1887", patch_ad1881, NULL }, { 0x41445363, 0xffffffff, "AD1886A", patch_ad1881, NULL }, { 0x41445368, 0xffffffff, "AD1888", patch_ad1888, NULL }, { 0x41445370, 0xffffffff, "AD1980", patch_ad1980, NULL }, { 0x41445372, 0xffffffff, "AD1981A", patch_ad1981a, NULL }, { 0x41445374, 0xffffffff, "AD1981B", patch_ad1981b, NULL }, { 0x41445375, 0xffffffff, "AD1985", patch_ad1985, NULL }, { 0x41445378, 0xffffffff, "AD1986", patch_ad1986, NULL }, { 0x414b4d00, 0xffffffff, "AK4540", NULL, NULL }, { 0x414b4d01, 0xffffffff, "AK4542", NULL, NULL }, { 0x414b4d02, 0xffffffff, "AK4543", NULL, NULL }, { 0x414b4d06, 0xffffffff, "AK4544A", NULL, NULL }, { 0x414b4d07, 0xffffffff, "AK4545", NULL, NULL }, { 0x414c4300, 0xffffff00, "ALC100,100P", NULL, NULL }, { 0x414c4710, 0xfffffff0, "ALC200,200P", NULL, NULL }, { 0x414c4721, 0xffffffff, "ALC650D", NULL, NULL }, /* already patched */ { 0x414c4722, 0xffffffff, "ALC650E", NULL, NULL }, /* already patched */ { 0x414c4723, 0xffffffff, "ALC650F", NULL, NULL }, /* already patched */ { 0x414c4720, 0xfffffff0, "ALC650", patch_alc650, NULL }, { 0x414c4730, 0xffffffff, "ALC101", NULL, NULL }, { 0x414c4740, 0xfffffff0, "ALC202", NULL, NULL }, { 0x414c4750, 0xfffffff0, "ALC250", NULL, NULL }, { 0x414c4760, 0xfffffff0, "ALC655", patch_alc655, NULL }, { 0x414c4770, 0xfffffff0, "ALC203", patch_alc203, NULL }, { 0x414c4781, 0xffffffff, "ALC658D", NULL, NULL }, /* already patched */ { 0x414c4780, 0xfffffff0, "ALC658", patch_alc655, NULL }, { 0x414c4790, 0xfffffff0, "ALC850", patch_alc850, NULL }, { 0x415a5401, 0xffffffff, "AZF3328", patch_aztech_azf3328, NULL }, { 0x434d4941, 0xffffffff, "CMI9738", patch_cm9738, NULL }, { 0x434d4961, 0xffffffff, "CMI9739", patch_cm9739, NULL }, { 0x434d4969, 0xffffffff, "CMI9780", patch_cm9780, NULL }, { 0x434d4978, 0xffffffff, "CMI9761A", patch_cm9761, NULL }, { 0x434d4982, 0xffffffff, "CMI9761B", patch_cm9761, NULL }, { 0x434d4983, 0xffffffff, "CMI9761A+", patch_cm9761, NULL }, { 0x43525900, 0xfffffff8, "CS4297", NULL, NULL }, { 0x43525910, 0xfffffff8, "CS4297A", patch_cirrus_spdif, NULL }, { 0x43525920, 0xfffffff8, "CS4298", patch_cirrus_spdif, NULL }, { 0x43525928, 0xfffffff8, "CS4294", NULL, NULL }, { 0x43525930, 0xfffffff8, "CS4299", patch_cirrus_cs4299, NULL }, { 0x43525948, 0xfffffff8, "CS4201", NULL, NULL }, { 0x43525958, 0xfffffff8, "CS4205", patch_cirrus_spdif, NULL }, { 0x43525960, 0xfffffff8, "CS4291", NULL, NULL }, { 0x43525970, 0xfffffff8, "CS4202", NULL, NULL }, { 0x43585421, 0xffffffff, "HSD11246", NULL, NULL }, // SmartMC II { 0x43585428, 0xfffffff8, "Cx20468", patch_conexant, NULL }, // SmartAMC fixme: the mask might be different { 0x43585430, 0xffffffff, "Cx20468-31", patch_conexant, NULL }, { 0x43585431, 0xffffffff, "Cx20551", patch_cx20551, NULL }, { 0x44543031, 0xfffffff0, "DT0398", NULL, NULL }, { 0x454d4328, 0xffffffff, "EM28028", NULL, NULL }, // same as TR28028? { 0x45838308, 0xffffffff, "ESS1988", NULL, NULL }, { 0x48525300, 0xffffff00, "HMP9701", NULL, NULL }, { 0x49434501, 0xffffffff, "ICE1230", NULL, NULL }, { 0x49434511, 0xffffffff, "ICE1232", NULL, NULL }, // alias VIA VT1611A? { 0x49434514, 0xffffffff, "ICE1232A", NULL, NULL }, { 0x49434551, 0xffffffff, "VT1616", patch_vt1616, NULL }, { 0x49434552, 0xffffffff, "VT1616i", patch_vt1616, NULL }, // VT1616 compatible (chipset integrated) { 0x49544520, 0xffffffff, "IT2226E", NULL, NULL }, { 0x49544561, 0xffffffff, "IT2646E", patch_it2646, NULL }, { 0x4e534300, 0xffffffff, "LM4540,43,45,46,48", NULL, NULL }, // only guess --jk { 0x4e534331, 0xffffffff, "LM4549", NULL, NULL }, { 0x4e534350, 0xffffffff, "LM4550", patch_lm4550, NULL }, // volume wrap fix { 0x50534304, 0xffffffff, "UCB1400", patch_ucb1400, NULL }, { 0x53494c20, 0xffffffe0, "Si3036,8", mpatch_si3036, mpatch_si3036, AC97_MODEM_PATCH }, { 0x53544d02, 0xffffffff, "ST7597", NULL, NULL }, { 0x54524102, 0xffffffff, "TR28022", NULL, NULL }, { 0x54524103, 0xffffffff, "TR28023", NULL, NULL }, { 0x54524106, 0xffffffff, "TR28026", NULL, NULL }, { 0x54524108, 0xffffffff, "TR28028", patch_tritech_tr28028, NULL }, // added by xin jin [07/09/99] { 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)] { 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL }, { 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF { 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF { 0x56494182, 0xffffffff, "VIA1618", patch_vt1618, NULL }, { 0x57454301, 0xffffffff, "W83971D", NULL, NULL }, { 0x574d4c00, 0xffffffff, "WM9701,WM9701A", NULL, NULL }, { 0x574d4C03, 0xffffffff, "WM9703,WM9707,WM9708,WM9717", patch_wolfson03, NULL}, { 0x574d4C04, 0xffffffff, "WM9704M,WM9704Q", patch_wolfson04, NULL}, { 0x574d4C05, 0xffffffff, "WM9705,WM9710", patch_wolfson05, NULL}, { 0x574d4C09, 0xffffffff, "WM9709", NULL, NULL}, { 0x574d4C12, 0xffffffff, "WM9711,WM9712,WM9715", patch_wolfson11, NULL}, { 0x574d4c13, 0xffffffff, "WM9713,WM9714", patch_wolfson13, NULL, AC97_DEFAULT_POWER_OFF}, { 0x594d4800, 0xffffffff, "YMF743", patch_yamaha_ymf743, NULL }, { 0x594d4802, 0xffffffff, "YMF752", NULL, NULL }, { 0x594d4803, 0xffffffff, "YMF753", patch_yamaha_ymf753, NULL }, { 0x83847600, 0xffffffff, "STAC9700,83,84", patch_sigmatel_stac9700, NULL }, { 0x83847604, 0xffffffff, "STAC9701,3,4,5", NULL, NULL }, { 0x83847605, 0xffffffff, "STAC9704", NULL, NULL }, { 0x83847608, 0xffffffff, "STAC9708,11", patch_sigmatel_stac9708, NULL }, { 0x83847609, 0xffffffff, "STAC9721,23", patch_sigmatel_stac9721, NULL }, { 0x83847644, 0xffffffff, "STAC9744", patch_sigmatel_stac9744, NULL }, { 0x83847650, 0xffffffff, "STAC9750,51", NULL, NULL }, // patch? { 0x83847652, 0xffffffff, "STAC9752,53", NULL, NULL }, // patch? { 0x83847656, 0xffffffff, "STAC9756,57", patch_sigmatel_stac9756, NULL }, { 0x83847658, 0xffffffff, "STAC9758,59", patch_sigmatel_stac9758, NULL }, { 0x83847666, 0xffffffff, "STAC9766,67", NULL, NULL }, // patch? { 0, 0, NULL, NULL, NULL } }; static void update_power_regs(struct snd_ac97 *ac97); #ifdef CONFIG_SND_AC97_POWER_SAVE #define ac97_is_power_save_mode(ac97) \ ((ac97->scaps & AC97_SCAP_POWER_SAVE) && power_save) #else #define ac97_is_power_save_mode(ac97) 0 #endif /* * I/O routines */ static int snd_ac97_valid_reg(struct snd_ac97 *ac97, unsigned short reg) { /* filter some registers for buggy codecs */ switch (ac97->id) { case AC97_ID_ST_AC97_ID4: if (reg == 0x08) return 0; /* fall through */ case AC97_ID_ST7597: if (reg == 0x22 || reg == 0x7a) return 1; /* fall through */ case AC97_ID_AK4540: case AC97_ID_AK4542: if (reg <= 0x1c || reg == 0x20 || reg == 0x26 || reg >= 0x7c) return 1; return 0; case AC97_ID_AD1819: /* AD1819 */ case AC97_ID_AD1881: /* AD1881 */ case AC97_ID_AD1881A: /* AD1881A */ if (reg >= 0x3a && reg <= 0x6e) /* 0x59 */ return 0; return 1; case AC97_ID_AD1885: /* AD1885 */ case AC97_ID_AD1886: /* AD1886 */ case AC97_ID_AD1886A: /* AD1886A - !!verify!! --jk */ case AC97_ID_AD1887: /* AD1887 - !!verify!! --jk */ if (reg == 0x5a) return 1; if (reg >= 0x3c && reg <= 0x6e) /* 0x59 */ return 0; return 1; case AC97_ID_STAC9700: case AC97_ID_STAC9704: case AC97_ID_STAC9705: case AC97_ID_STAC9708: case AC97_ID_STAC9721: case AC97_ID_STAC9744: case AC97_ID_STAC9756: if (reg <= 0x3a || reg >= 0x5a) return 1; return 0; } return 1; } /** * snd_ac97_write - write a value on the given register * @ac97: the ac97 instance * @reg: the register to change * @value: the value to set * * Writes a value on the given register. This will invoke the write * callback directly after the register check. * This function doesn't change the register cache unlike * #snd_ca97_write_cache(), so use this only when you don't want to * reflect the change to the suspend/resume state. */ void snd_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short value) { if (!snd_ac97_valid_reg(ac97, reg)) return; if ((ac97->id & 0xffffff00) == AC97_ID_ALC100) { /* Fix H/W bug of ALC100/100P */ if (reg == AC97_MASTER || reg == AC97_HEADPHONE) ac97->bus->ops->write(ac97, AC97_RESET, 0); /* reset audio codec */ } ac97->bus->ops->write(ac97, reg, value); } EXPORT_SYMBOL(snd_ac97_write); /** * snd_ac97_read - read a value from the given register * * @ac97: the ac97 instance * @reg: the register to read * * Reads a value from the given register. This will invoke the read * callback directly after the register check. * * Return: The read value. */ unsigned short snd_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { if (!snd_ac97_valid_reg(ac97, reg)) return 0; return ac97->bus->ops->read(ac97, reg); } /* read a register - return the cached value if already read */ static inline unsigned short snd_ac97_read_cache(struct snd_ac97 *ac97, unsigned short reg) { if (! test_bit(reg, ac97->reg_accessed)) { ac97->regs[reg] = ac97->bus->ops->read(ac97, reg); // set_bit(reg, ac97->reg_accessed); } return ac97->regs[reg]; } EXPORT_SYMBOL(snd_ac97_read); /** * snd_ac97_write_cache - write a value on the given register and update the cache * @ac97: the ac97 instance * @reg: the register to change * @value: the value to set * * Writes a value on the given register and updates the register * cache. The cached values are used for the cached-read and the * suspend/resume. */ void snd_ac97_write_cache(struct snd_ac97 *ac97, unsigned short reg, unsigned short value) { if (!snd_ac97_valid_reg(ac97, reg)) return; mutex_lock(&ac97->reg_mutex); ac97->regs[reg] = value; ac97->bus->ops->write(ac97, reg, value); set_bit(reg, ac97->reg_accessed); mutex_unlock(&ac97->reg_mutex); } EXPORT_SYMBOL(snd_ac97_write_cache); /** * snd_ac97_update - update the value on the given register * @ac97: the ac97 instance * @reg: the register to change * @value: the value to set * * Compares the value with the register cache and updates the value * only when the value is changed. * * Return: 1 if the value is changed, 0 if no change, or a negative * code on failure. */ int snd_ac97_update(struct snd_ac97 *ac97, unsigned short reg, unsigned short value) { int change; if (!snd_ac97_valid_reg(ac97, reg)) return -EINVAL; mutex_lock(&ac97->reg_mutex); change = ac97->regs[reg] != value; if (change) { ac97->regs[reg] = value; ac97->bus->ops->write(ac97, reg, value); } set_bit(reg, ac97->reg_accessed); mutex_unlock(&ac97->reg_mutex); return change; } EXPORT_SYMBOL(snd_ac97_update); /** * snd_ac97_update_bits - update the bits on the given register * @ac97: the ac97 instance * @reg: the register to change * @mask: the bit-mask to change * @value: the value to set * * Updates the masked-bits on the given register only when the value * is changed. * * Return: 1 if the bits are changed, 0 if no change, or a negative * code on failure. */ int snd_ac97_update_bits(struct snd_ac97 *ac97, unsigned short reg, unsigned short mask, unsigned short value) { int change; if (!snd_ac97_valid_reg(ac97, reg)) return -EINVAL; mutex_lock(&ac97->reg_mutex); change = snd_ac97_update_bits_nolock(ac97, reg, mask, value); mutex_unlock(&ac97->reg_mutex); return change; } EXPORT_SYMBOL(snd_ac97_update_bits); /* no lock version - see snd_ac97_update_bits() */ int snd_ac97_update_bits_nolock(struct snd_ac97 *ac97, unsigned short reg, unsigned short mask, unsigned short value) { int change; unsigned short old, new; old = snd_ac97_read_cache(ac97, reg); new = (old & ~mask) | (value & mask); change = old != new; if (change) { ac97->regs[reg] = new; ac97->bus->ops->write(ac97, reg, new); } set_bit(reg, ac97->reg_accessed); return change; } static int snd_ac97_ad18xx_update_pcm_bits(struct snd_ac97 *ac97, int codec, unsigned short mask, unsigned short value) { int change; unsigned short old, new, cfg; mutex_lock(&ac97->page_mutex); old = ac97->spec.ad18xx.pcmreg[codec]; new = (old & ~mask) | (value & mask); change = old != new; if (change) { mutex_lock(&ac97->reg_mutex); cfg = snd_ac97_read_cache(ac97, AC97_AD_SERIAL_CFG); ac97->spec.ad18xx.pcmreg[codec] = new; /* select single codec */ ac97->bus->ops->write(ac97, AC97_AD_SERIAL_CFG, (cfg & ~0x7000) | ac97->spec.ad18xx.unchained[codec] | ac97->spec.ad18xx.chained[codec]); /* update PCM bits */ ac97->bus->ops->write(ac97, AC97_PCM, new); /* select all codecs */ ac97->bus->ops->write(ac97, AC97_AD_SERIAL_CFG, cfg | 0x7000); mutex_unlock(&ac97->reg_mutex); } mutex_unlock(&ac97->page_mutex); return change; } /* * Controls */ static int snd_ac97_info_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = e->shift_l == e->shift_r ? 1 : 2; uinfo->value.enumerated.items = e->mask; if (uinfo->value.enumerated.item > e->mask - 1) uinfo->value.enumerated.item = e->mask - 1; strcpy(uinfo->value.enumerated.name, e->texts[uinfo->value.enumerated.item]); return 0; } static int snd_ac97_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value; unsigned short val, bitmask; for (bitmask = 1; bitmask < e->mask; bitmask <<= 1) ; val = snd_ac97_read_cache(ac97, e->reg); ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & (bitmask - 1); if (e->shift_l != e->shift_r) ucontrol->value.enumerated.item[1] = (val >> e->shift_r) & (bitmask - 1); return 0; } static int snd_ac97_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); struct ac97_enum *e = (struct ac97_enum *)kcontrol->private_value; unsigned short val; unsigned short mask, bitmask; for (bitmask = 1; bitmask < e->mask; bitmask <<= 1) ; if (ucontrol->value.enumerated.item[0] > e->mask - 1) return -EINVAL; val = ucontrol->value.enumerated.item[0] << e->shift_l; mask = (bitmask - 1) << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->mask - 1) return -EINVAL; val |= ucontrol->value.enumerated.item[1] << e->shift_r; mask |= (bitmask - 1) << e->shift_r; } return snd_ac97_update_bits(ac97, e->reg, mask, val); } /* save/restore ac97 v2.3 paging */ static int snd_ac97_page_save(struct snd_ac97 *ac97, int reg, struct snd_kcontrol *kcontrol) { int page_save = -1; if ((kcontrol->private_value & (1<<25)) && (ac97->ext_id & AC97_EI_REV_MASK) >= AC97_EI_REV_23 && (reg >= 0x60 && reg < 0x70)) { unsigned short page = (kcontrol->private_value >> 26) & 0x0f; mutex_lock(&ac97->page_mutex); /* lock paging */ page_save = snd_ac97_read(ac97, AC97_INT_PAGING) & AC97_PAGE_MASK; snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page); } return page_save; } static void snd_ac97_page_restore(struct snd_ac97 *ac97, int page_save) { if (page_save >= 0) { snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page_save); mutex_unlock(&ac97->page_mutex); /* unlock paging */ } } /* volume and switch controls */ static int snd_ac97_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; int shift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = shift == rshift ? 1 : 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_ac97_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0x01; int page_save; page_save = snd_ac97_page_save(ac97, reg, kcontrol); ucontrol->value.integer.value[0] = (snd_ac97_read_cache(ac97, reg) >> shift) & mask; if (shift != rshift) ucontrol->value.integer.value[1] = (snd_ac97_read_cache(ac97, reg) >> rshift) & mask; if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; if (shift != rshift) ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } snd_ac97_page_restore(ac97, page_save); return 0; } static int snd_ac97_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0x01; int err, page_save; unsigned short val, val2, val_mask; page_save = snd_ac97_page_save(ac97, reg, kcontrol); val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; val_mask = mask << shift; val = val << shift; if (shift != rshift) { val2 = (ucontrol->value.integer.value[1] & mask); if (invert) val2 = mask - val2; val_mask |= mask << rshift; val |= val2 << rshift; } err = snd_ac97_update_bits(ac97, reg, val_mask, val); snd_ac97_page_restore(ac97, page_save); #ifdef CONFIG_SND_AC97_POWER_SAVE /* check analog mixer power-down */ if ((val_mask & AC97_PD_EAPD) && (kcontrol->private_value & (1<<30))) { if (val & AC97_PD_EAPD) ac97->power_up &= ~(1 << (reg>>1)); else ac97->power_up |= 1 << (reg>>1); update_power_regs(ac97); } #endif return err; } static const struct snd_kcontrol_new snd_ac97_controls_master_mono[2] = { AC97_SINGLE("Master Mono Playback Switch", AC97_MASTER_MONO, 15, 1, 1), AC97_SINGLE("Master Mono Playback Volume", AC97_MASTER_MONO, 0, 31, 1) }; static const struct snd_kcontrol_new snd_ac97_controls_tone[2] = { AC97_SINGLE("Tone Control - Bass", AC97_MASTER_TONE, 8, 15, 1), AC97_SINGLE("Tone Control - Treble", AC97_MASTER_TONE, 0, 15, 1) }; static const struct snd_kcontrol_new snd_ac97_controls_pc_beep[2] = { AC97_SINGLE("Beep Playback Switch", AC97_PC_BEEP, 15, 1, 1), AC97_SINGLE("Beep Playback Volume", AC97_PC_BEEP, 1, 15, 1) }; static const struct snd_kcontrol_new snd_ac97_controls_mic_boost = AC97_SINGLE("Mic Boost (+20dB)", AC97_MIC, 6, 1, 0); static const char* std_rec_sel[] = {"Mic", "CD", "Video", "Aux", "Line", "Mix", "Mix Mono", "Phone"}; static const char* std_3d_path[] = {"pre 3D", "post 3D"}; static const char* std_mix[] = {"Mix", "Mic"}; static const char* std_mic[] = {"Mic1", "Mic2"}; static const struct ac97_enum std_enum[] = { AC97_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 8, std_rec_sel), AC97_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, std_3d_path), AC97_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 9, 2, std_mix), AC97_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 8, 2, std_mic), }; static const struct snd_kcontrol_new snd_ac97_control_capture_src = AC97_ENUM("Capture Source", std_enum[0]); static const struct snd_kcontrol_new snd_ac97_control_capture_vol = AC97_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 15, 0); static const struct snd_kcontrol_new snd_ac97_controls_mic_capture[2] = { AC97_SINGLE("Mic Capture Switch", AC97_REC_GAIN_MIC, 15, 1, 1), AC97_SINGLE("Mic Capture Volume", AC97_REC_GAIN_MIC, 0, 15, 0) }; enum { AC97_GENERAL_PCM_OUT = 0, AC97_GENERAL_STEREO_ENHANCEMENT, AC97_GENERAL_3D, AC97_GENERAL_LOUDNESS, AC97_GENERAL_MONO, AC97_GENERAL_MIC, AC97_GENERAL_LOOPBACK }; static const struct snd_kcontrol_new snd_ac97_controls_general[7] = { AC97_ENUM("PCM Out Path & Mute", std_enum[1]), AC97_SINGLE("Simulated Stereo Enhancement", AC97_GENERAL_PURPOSE, 14, 1, 0), AC97_SINGLE("3D Control - Switch", AC97_GENERAL_PURPOSE, 13, 1, 0), AC97_SINGLE("Loudness (bass boost)", AC97_GENERAL_PURPOSE, 12, 1, 0), AC97_ENUM("Mono Output Select", std_enum[2]), AC97_ENUM("Mic Select", std_enum[3]), AC97_SINGLE("ADC/DAC Loopback", AC97_GENERAL_PURPOSE, 7, 1, 0) }; static const struct snd_kcontrol_new snd_ac97_controls_3d[2] = { AC97_SINGLE("3D Control - Center", AC97_3D_CONTROL, 8, 15, 0), AC97_SINGLE("3D Control - Depth", AC97_3D_CONTROL, 0, 15, 0) }; static const struct snd_kcontrol_new snd_ac97_controls_center[2] = { AC97_SINGLE("Center Playback Switch", AC97_CENTER_LFE_MASTER, 7, 1, 1), AC97_SINGLE("Center Playback Volume", AC97_CENTER_LFE_MASTER, 0, 31, 1) }; static const struct snd_kcontrol_new snd_ac97_controls_lfe[2] = { AC97_SINGLE("LFE Playback Switch", AC97_CENTER_LFE_MASTER, 15, 1, 1), AC97_SINGLE("LFE Playback Volume", AC97_CENTER_LFE_MASTER, 8, 31, 1) }; static const struct snd_kcontrol_new snd_ac97_control_eapd = AC97_SINGLE("External Amplifier", AC97_POWERDOWN, 15, 1, 1); static const struct snd_kcontrol_new snd_ac97_controls_modem_switches[2] = { AC97_SINGLE("Off-hook Switch", AC97_GPIO_STATUS, 0, 1, 0), AC97_SINGLE("Caller ID Switch", AC97_GPIO_STATUS, 2, 1, 0) }; /* change the existing EAPD control as inverted */ static void set_inv_eapd(struct snd_ac97 *ac97, struct snd_kcontrol *kctl) { kctl->private_value = AC97_SINGLE_VALUE(AC97_POWERDOWN, 15, 1, 0); snd_ac97_update_bits(ac97, AC97_POWERDOWN, (1<<15), (1<<15)); /* EAPD up */ ac97->scaps |= AC97_SCAP_INV_EAPD; } static int snd_ac97_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ac97_spdif_cmask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = IEC958_AES0_PROFESSIONAL | IEC958_AES0_NONAUDIO | IEC958_AES0_CON_EMPHASIS_5015 | IEC958_AES0_CON_NOT_COPYRIGHT; ucontrol->value.iec958.status[1] = IEC958_AES1_CON_CATEGORY | IEC958_AES1_CON_ORIGINAL; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS; return 0; } static int snd_ac97_spdif_pmask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { /* FIXME: AC'97 spec doesn't say which bits are used for what */ ucontrol->value.iec958.status[0] = IEC958_AES0_PROFESSIONAL | IEC958_AES0_NONAUDIO | IEC958_AES0_PRO_FS | IEC958_AES0_PRO_EMPHASIS_5015; return 0; } static int snd_ac97_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); mutex_lock(&ac97->reg_mutex); ucontrol->value.iec958.status[0] = ac97->spdif_status & 0xff; ucontrol->value.iec958.status[1] = (ac97->spdif_status >> 8) & 0xff; ucontrol->value.iec958.status[2] = (ac97->spdif_status >> 16) & 0xff; ucontrol->value.iec958.status[3] = (ac97->spdif_status >> 24) & 0xff; mutex_unlock(&ac97->reg_mutex); return 0; } static int snd_ac97_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); unsigned int new = 0; unsigned short val = 0; int change; new = val = ucontrol->value.iec958.status[0] & (IEC958_AES0_PROFESSIONAL|IEC958_AES0_NONAUDIO); if (ucontrol->value.iec958.status[0] & IEC958_AES0_PROFESSIONAL) { new |= ucontrol->value.iec958.status[0] & (IEC958_AES0_PRO_FS|IEC958_AES0_PRO_EMPHASIS_5015); switch (new & IEC958_AES0_PRO_FS) { case IEC958_AES0_PRO_FS_44100: val |= 0<<12; break; case IEC958_AES0_PRO_FS_48000: val |= 2<<12; break; case IEC958_AES0_PRO_FS_32000: val |= 3<<12; break; default: val |= 1<<12; break; } if ((new & IEC958_AES0_PRO_EMPHASIS) == IEC958_AES0_PRO_EMPHASIS_5015) val |= 1<<3; } else { new |= ucontrol->value.iec958.status[0] & (IEC958_AES0_CON_EMPHASIS_5015|IEC958_AES0_CON_NOT_COPYRIGHT); new |= ((ucontrol->value.iec958.status[1] & (IEC958_AES1_CON_CATEGORY|IEC958_AES1_CON_ORIGINAL)) << 8); new |= ((ucontrol->value.iec958.status[3] & IEC958_AES3_CON_FS) << 24); if ((new & IEC958_AES0_CON_EMPHASIS) == IEC958_AES0_CON_EMPHASIS_5015) val |= 1<<3; if (!(new & IEC958_AES0_CON_NOT_COPYRIGHT)) val |= 1<<2; val |= ((new >> 8) & 0xff) << 4; // category + original switch ((new >> 24) & 0xff) { case IEC958_AES3_CON_FS_44100: val |= 0<<12; break; case IEC958_AES3_CON_FS_48000: val |= 2<<12; break; case IEC958_AES3_CON_FS_32000: val |= 3<<12; break; default: val |= 1<<12; break; } } mutex_lock(&ac97->reg_mutex); change = ac97->spdif_status != new; ac97->spdif_status = new; if (ac97->flags & AC97_CS_SPDIF) { int x = (val >> 12) & 0x03; switch (x) { case 0: x = 1; break; // 44.1 case 2: x = 0; break; // 48.0 default: x = 0; break; // illegal. } change |= snd_ac97_update_bits_nolock(ac97, AC97_CSR_SPDIF, 0x3fff, ((val & 0xcfff) | (x << 12))); } else if (ac97->flags & AC97_CX_SPDIF) { int v; v = new & (IEC958_AES0_CON_EMPHASIS_5015|IEC958_AES0_CON_NOT_COPYRIGHT) ? 0 : AC97_CXR_COPYRGT; v |= new & IEC958_AES0_NONAUDIO ? AC97_CXR_SPDIF_AC3 : AC97_CXR_SPDIF_PCM; change |= snd_ac97_update_bits_nolock(ac97, AC97_CXR_AUDIO_MISC, AC97_CXR_SPDIF_MASK | AC97_CXR_COPYRGT, v); } else if (ac97->id == AC97_ID_YMF743) { change |= snd_ac97_update_bits_nolock(ac97, AC97_YMF7X3_DIT_CTRL, 0xff38, ((val << 4) & 0xff00) | ((val << 2) & 0x0038)); } else { unsigned short extst = snd_ac97_read_cache(ac97, AC97_EXTENDED_STATUS); snd_ac97_update_bits_nolock(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); /* turn off */ change |= snd_ac97_update_bits_nolock(ac97, AC97_SPDIF, 0x3fff, val); if (extst & AC97_EA_SPDIF) { snd_ac97_update_bits_nolock(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, AC97_EA_SPDIF); /* turn on again */ } } mutex_unlock(&ac97->reg_mutex); return change; } static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; // int invert = (kcontrol->private_value >> 24) & 0xff; unsigned short value, old, new; int change; value = (ucontrol->value.integer.value[0] & mask); mutex_lock(&ac97->reg_mutex); mask <<= shift; value <<= shift; old = snd_ac97_read_cache(ac97, reg); new = (old & ~mask) | value; change = old != new; if (change) { unsigned short extst = snd_ac97_read_cache(ac97, AC97_EXTENDED_STATUS); snd_ac97_update_bits_nolock(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); /* turn off */ change = snd_ac97_update_bits_nolock(ac97, reg, mask, value); if (extst & AC97_EA_SPDIF) snd_ac97_update_bits_nolock(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, AC97_EA_SPDIF); /* turn on again */ } mutex_unlock(&ac97->reg_mutex); return change; } static const struct snd_kcontrol_new snd_ac97_controls_spdif[5] = { { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_ac97_spdif_mask_info, .get = snd_ac97_spdif_cmask_get, }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK), .info = snd_ac97_spdif_mask_info, .get = snd_ac97_spdif_pmask_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_ac97_spdif_mask_info, .get = snd_ac97_spdif_default_get, .put = snd_ac97_spdif_default_put, }, AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH),AC97_EXTENDED_STATUS, 2, 1, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "AC97-SPSA", .info = snd_ac97_info_volsw, .get = snd_ac97_get_volsw, .put = snd_ac97_put_spsa, .private_value = AC97_SINGLE_VALUE(AC97_EXTENDED_STATUS, 4, 3, 0) }, }; #define AD18XX_PCM_BITS(xname, codec, lshift, rshift, mask) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_ac97_ad18xx_pcm_info_bits, \ .get = snd_ac97_ad18xx_pcm_get_bits, .put = snd_ac97_ad18xx_pcm_put_bits, \ .private_value = (codec) | ((lshift) << 8) | ((rshift) << 12) | ((mask) << 16) } static int snd_ac97_ad18xx_pcm_info_bits(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int mask = (kcontrol->private_value >> 16) & 0x0f; int lshift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; if (lshift != rshift && (ac97->flags & AC97_STEREO_MUTES)) uinfo->count = 2; else uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_ac97_ad18xx_pcm_get_bits(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int codec = kcontrol->private_value & 3; int lshift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; int mask = (kcontrol->private_value >> 16) & 0xff; ucontrol->value.integer.value[0] = mask - ((ac97->spec.ad18xx.pcmreg[codec] >> lshift) & mask); if (lshift != rshift && (ac97->flags & AC97_STEREO_MUTES)) ucontrol->value.integer.value[1] = mask - ((ac97->spec.ad18xx.pcmreg[codec] >> rshift) & mask); return 0; } static int snd_ac97_ad18xx_pcm_put_bits(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int codec = kcontrol->private_value & 3; int lshift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; int mask = (kcontrol->private_value >> 16) & 0xff; unsigned short val, valmask; val = (mask - (ucontrol->value.integer.value[0] & mask)) << lshift; valmask = mask << lshift; if (lshift != rshift && (ac97->flags & AC97_STEREO_MUTES)) { val |= (mask - (ucontrol->value.integer.value[1] & mask)) << rshift; valmask |= mask << rshift; } return snd_ac97_ad18xx_update_pcm_bits(ac97, codec, valmask, val); } #define AD18XX_PCM_VOLUME(xname, codec) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_ac97_ad18xx_pcm_info_volume, \ .get = snd_ac97_ad18xx_pcm_get_volume, .put = snd_ac97_ad18xx_pcm_put_volume, \ .private_value = codec } static int snd_ac97_ad18xx_pcm_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 31; return 0; } static int snd_ac97_ad18xx_pcm_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int codec = kcontrol->private_value & 3; mutex_lock(&ac97->page_mutex); ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31); ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31); mutex_unlock(&ac97->page_mutex); return 0; } static int snd_ac97_ad18xx_pcm_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int codec = kcontrol->private_value & 3; unsigned short val1, val2; val1 = 31 - (ucontrol->value.integer.value[0] & 31); val2 = 31 - (ucontrol->value.integer.value[1] & 31); return snd_ac97_ad18xx_update_pcm_bits(ac97, codec, 0x1f1f, (val1 << 8) | val2); } static const struct snd_kcontrol_new snd_ac97_controls_ad18xx_pcm[2] = { AD18XX_PCM_BITS("PCM Playback Switch", 0, 15, 7, 1), AD18XX_PCM_VOLUME("PCM Playback Volume", 0) }; static const struct snd_kcontrol_new snd_ac97_controls_ad18xx_surround[2] = { AD18XX_PCM_BITS("Surround Playback Switch", 1, 15, 7, 1), AD18XX_PCM_VOLUME("Surround Playback Volume", 1) }; static const struct snd_kcontrol_new snd_ac97_controls_ad18xx_center[2] = { AD18XX_PCM_BITS("Center Playback Switch", 2, 15, 15, 1), AD18XX_PCM_BITS("Center Playback Volume", 2, 8, 8, 31) }; static const struct snd_kcontrol_new snd_ac97_controls_ad18xx_lfe[2] = { AD18XX_PCM_BITS("LFE Playback Switch", 2, 7, 7, 1), AD18XX_PCM_BITS("LFE Playback Volume", 2, 0, 0, 31) }; /* * */ static void snd_ac97_powerdown(struct snd_ac97 *ac97); static int snd_ac97_bus_free(struct snd_ac97_bus *bus) { if (bus) { snd_ac97_bus_proc_done(bus); kfree(bus->pcms); if (bus->private_free) bus->private_free(bus); kfree(bus); } return 0; } static int snd_ac97_bus_dev_free(struct snd_device *device) { struct snd_ac97_bus *bus = device->device_data; return snd_ac97_bus_free(bus); } static int snd_ac97_free(struct snd_ac97 *ac97) { if (ac97) { #ifdef CONFIG_SND_AC97_POWER_SAVE cancel_delayed_work_sync(&ac97->power_work); #endif snd_ac97_proc_done(ac97); if (ac97->bus) ac97->bus->codec[ac97->num] = NULL; if (ac97->private_free) ac97->private_free(ac97); kfree(ac97); } return 0; } static int snd_ac97_dev_free(struct snd_device *device) { struct snd_ac97 *ac97 = device->device_data; snd_ac97_powerdown(ac97); /* for avoiding click noises during shut down */ return snd_ac97_free(ac97); } static int snd_ac97_try_volume_mix(struct snd_ac97 * ac97, int reg) { unsigned short val, mask = AC97_MUTE_MASK_MONO; if (! snd_ac97_valid_reg(ac97, reg)) return 0; switch (reg) { case AC97_MASTER_TONE: return ac97->caps & AC97_BC_BASS_TREBLE ? 1 : 0; case AC97_HEADPHONE: return ac97->caps & AC97_BC_HEADPHONE ? 1 : 0; case AC97_REC_GAIN_MIC: return ac97->caps & AC97_BC_DEDICATED_MIC ? 1 : 0; case AC97_3D_CONTROL: if (ac97->caps & AC97_BC_3D_TECH_ID_MASK) { val = snd_ac97_read(ac97, reg); /* if nonzero - fixed and we can't set it */ return val == 0; } return 0; case AC97_CENTER_LFE_MASTER: /* center */ if ((ac97->ext_id & AC97_EI_CDAC) == 0) return 0; break; case AC97_CENTER_LFE_MASTER+1: /* lfe */ if ((ac97->ext_id & AC97_EI_LDAC) == 0) return 0; reg = AC97_CENTER_LFE_MASTER; mask = 0x0080; break; case AC97_SURROUND_MASTER: if ((ac97->ext_id & AC97_EI_SDAC) == 0) return 0; break; } val = snd_ac97_read(ac97, reg); if (!(val & mask)) { /* nothing seems to be here - mute flag is not set */ /* try another test */ snd_ac97_write_cache(ac97, reg, val | mask); val = snd_ac97_read(ac97, reg); val = snd_ac97_read(ac97, reg); if (!(val & mask)) return 0; /* nothing here */ } return 1; /* success, useable */ } static void check_volume_resolution(struct snd_ac97 *ac97, int reg, unsigned char *lo_max, unsigned char *hi_max) { unsigned short cbit[3] = { 0x20, 0x10, 0x01 }; unsigned char max[3] = { 63, 31, 15 }; int i; /* first look up the static resolution table */ if (ac97->res_table) { const struct snd_ac97_res_table *tbl; for (tbl = ac97->res_table; tbl->reg; tbl++) { if (tbl->reg == reg) { *lo_max = tbl->bits & 0xff; *hi_max = (tbl->bits >> 8) & 0xff; return; } } } *lo_max = *hi_max = 0; for (i = 0 ; i < ARRAY_SIZE(cbit); i++) { unsigned short val; snd_ac97_write( ac97, reg, AC97_MUTE_MASK_STEREO | cbit[i] | (cbit[i] << 8) ); /* Do the read twice due to buffers on some ac97 codecs. * e.g. The STAC9704 returns exactly what you wrote to the register * if you read it immediately. This causes the detect routine to fail. */ val = snd_ac97_read(ac97, reg); val = snd_ac97_read(ac97, reg); if (! *lo_max && (val & 0x7f) == cbit[i]) *lo_max = max[i]; if (! *hi_max && ((val >> 8) & 0x7f) == cbit[i]) *hi_max = max[i]; if (*lo_max && *hi_max) break; } } static int snd_ac97_try_bit(struct snd_ac97 * ac97, int reg, int bit) { unsigned short mask, val, orig, res; mask = 1 << bit; orig = snd_ac97_read(ac97, reg); val = orig ^ mask; snd_ac97_write(ac97, reg, val); res = snd_ac97_read(ac97, reg); snd_ac97_write_cache(ac97, reg, orig); return res == val; } /* check the volume resolution of center/lfe */ static void snd_ac97_change_volume_params2(struct snd_ac97 * ac97, int reg, int shift, unsigned char *max) { unsigned short val, val1; *max = 63; val = AC97_MUTE_MASK_STEREO | (0x20 << shift); snd_ac97_write(ac97, reg, val); val1 = snd_ac97_read(ac97, reg); if (val != val1) { *max = 31; } /* reset volume to zero */ snd_ac97_write_cache(ac97, reg, AC97_MUTE_MASK_STEREO); } static inline int printable(unsigned int x) { x &= 0xff; if (x < ' ' || x >= 0x71) { if (x <= 0x89) return x - 0x71 + 'A'; return '?'; } return x; } static struct snd_kcontrol *snd_ac97_cnew(const struct snd_kcontrol_new *_template, struct snd_ac97 * ac97) { struct snd_kcontrol_new template; memcpy(&template, _template, sizeof(template)); template.index = ac97->num; return snd_ctl_new1(&template, ac97); } /* * create mute switch(es) for normal stereo controls */ static int snd_ac97_cmute_new_stereo(struct snd_card *card, char *name, int reg, int check_stereo, int check_amix, struct snd_ac97 *ac97) { struct snd_kcontrol *kctl; int err; unsigned short val, val1, mute_mask; if (! snd_ac97_valid_reg(ac97, reg)) return 0; mute_mask = AC97_MUTE_MASK_MONO; val = snd_ac97_read(ac97, reg); if (check_stereo || (ac97->flags & AC97_STEREO_MUTES)) { /* check whether both mute bits work */ val1 = val | AC97_MUTE_MASK_STEREO; snd_ac97_write(ac97, reg, val1); if (val1 == snd_ac97_read(ac97, reg)) mute_mask = AC97_MUTE_MASK_STEREO; } if (mute_mask == AC97_MUTE_MASK_STEREO) { struct snd_kcontrol_new tmp = AC97_DOUBLE(name, reg, 15, 7, 1, 1); if (check_amix) tmp.private_value |= (1 << 30); tmp.index = ac97->num; kctl = snd_ctl_new1(&tmp, ac97); } else { struct snd_kcontrol_new tmp = AC97_SINGLE(name, reg, 15, 1, 1); if (check_amix) tmp.private_value |= (1 << 30); tmp.index = ac97->num; kctl = snd_ctl_new1(&tmp, ac97); } err = snd_ctl_add(card, kctl); if (err < 0) return err; /* mute as default */ snd_ac97_write_cache(ac97, reg, val | mute_mask); return 0; } /* * set dB information */ static const DECLARE_TLV_DB_SCALE(db_scale_4bit, -4500, 300, 0); static const DECLARE_TLV_DB_SCALE(db_scale_5bit, -4650, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_6bit, -9450, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_5bit_12db_max, -3450, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_rec_gain, 0, 150, 0); static const unsigned int *find_db_scale(unsigned int maxval) { switch (maxval) { case 0x0f: return db_scale_4bit; case 0x1f: return db_scale_5bit; case 0x3f: return db_scale_6bit; } return NULL; } static void set_tlv_db_scale(struct snd_kcontrol *kctl, const unsigned int *tlv) { kctl->tlv.p = tlv; if (tlv) kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; } /* * create a volume for normal stereo/mono controls */ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigned int lo_max, unsigned int hi_max, struct snd_ac97 *ac97) { int err; struct snd_kcontrol *kctl; if (! snd_ac97_valid_reg(ac97, reg)) return 0; if (hi_max) { /* invert */ struct snd_kcontrol_new tmp = AC97_DOUBLE(name, reg, 8, 0, lo_max, 1); tmp.index = ac97->num; kctl = snd_ctl_new1(&tmp, ac97); } else { /* invert */ struct snd_kcontrol_new tmp = AC97_SINGLE(name, reg, 0, lo_max, 1); tmp.index = ac97->num; kctl = snd_ctl_new1(&tmp, ac97); } if (!kctl) return -ENOMEM; if (reg >= AC97_PHONE && reg <= AC97_PCM) set_tlv_db_scale(kctl, db_scale_5bit_12db_max); else set_tlv_db_scale(kctl, find_db_scale(lo_max)); err = snd_ctl_add(card, kctl); if (err < 0) return err; snd_ac97_write_cache( ac97, reg, (snd_ac97_read(ac97, reg) & AC97_MUTE_MASK_STEREO) | lo_max | (hi_max << 8) ); return 0; } /* * create a mute-switch and a volume for normal stereo/mono controls */ static int snd_ac97_cmix_new_stereo(struct snd_card *card, const char *pfx, int reg, int check_stereo, int check_amix, struct snd_ac97 *ac97) { int err; char name[44]; unsigned char lo_max, hi_max; if (! snd_ac97_valid_reg(ac97, reg)) return 0; if (snd_ac97_try_bit(ac97, reg, 15)) { sprintf(name, "%s Switch", pfx); if ((err = snd_ac97_cmute_new_stereo(card, name, reg, check_stereo, check_amix, ac97)) < 0) return err; } check_volume_resolution(ac97, reg, &lo_max, &hi_max); if (lo_max) { sprintf(name, "%s Volume", pfx); if ((err = snd_ac97_cvol_new(card, name, reg, lo_max, hi_max, ac97)) < 0) return err; } return 0; } #define snd_ac97_cmix_new(card, pfx, reg, acheck, ac97) \ snd_ac97_cmix_new_stereo(card, pfx, reg, 0, acheck, ac97) #define snd_ac97_cmute_new(card, name, reg, acheck, ac97) \ snd_ac97_cmute_new_stereo(card, name, reg, 0, acheck, ac97) static unsigned int snd_ac97_determine_spdif_rates(struct snd_ac97 *ac97); static int snd_ac97_mixer_build(struct snd_ac97 * ac97) { struct snd_card *card = ac97->bus->card; struct snd_kcontrol *kctl; int err; unsigned int idx; unsigned char max; /* build master controls */ /* AD claims to remove this control from AD1887, although spec v2.2 does not allow this */ if (snd_ac97_try_volume_mix(ac97, AC97_MASTER)) { if (ac97->flags & AC97_HAS_NO_MASTER_VOL) err = snd_ac97_cmute_new(card, "Master Playback Switch", AC97_MASTER, 0, ac97); else err = snd_ac97_cmix_new(card, "Master Playback", AC97_MASTER, 0, ac97); if (err < 0) return err; } ac97->regs[AC97_CENTER_LFE_MASTER] = AC97_MUTE_MASK_STEREO; /* build center controls */ if ((snd_ac97_try_volume_mix(ac97, AC97_CENTER_LFE_MASTER)) && !(ac97->flags & AC97_AD_MULTI)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_center[0], ac97))) < 0) return err; if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_center[1], ac97))) < 0) return err; snd_ac97_change_volume_params2(ac97, AC97_CENTER_LFE_MASTER, 0, &max); kctl->private_value &= ~(0xff << 16); kctl->private_value |= (int)max << 16; set_tlv_db_scale(kctl, find_db_scale(max)); snd_ac97_write_cache(ac97, AC97_CENTER_LFE_MASTER, ac97->regs[AC97_CENTER_LFE_MASTER] | max); } /* build LFE controls */ if ((snd_ac97_try_volume_mix(ac97, AC97_CENTER_LFE_MASTER+1)) && !(ac97->flags & AC97_AD_MULTI)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_lfe[0], ac97))) < 0) return err; if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_lfe[1], ac97))) < 0) return err; snd_ac97_change_volume_params2(ac97, AC97_CENTER_LFE_MASTER, 8, &max); kctl->private_value &= ~(0xff << 16); kctl->private_value |= (int)max << 16; set_tlv_db_scale(kctl, find_db_scale(max)); snd_ac97_write_cache(ac97, AC97_CENTER_LFE_MASTER, ac97->regs[AC97_CENTER_LFE_MASTER] | max << 8); } /* build surround controls */ if ((snd_ac97_try_volume_mix(ac97, AC97_SURROUND_MASTER)) && !(ac97->flags & AC97_AD_MULTI)) { /* Surround Master (0x38) is with stereo mutes */ if ((err = snd_ac97_cmix_new_stereo(card, "Surround Playback", AC97_SURROUND_MASTER, 1, 0, ac97)) < 0) return err; } /* build headphone controls */ if (snd_ac97_try_volume_mix(ac97, AC97_HEADPHONE)) { if ((err = snd_ac97_cmix_new(card, "Headphone Playback", AC97_HEADPHONE, 0, ac97)) < 0) return err; } /* build master mono controls */ if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_MONO)) { if ((err = snd_ac97_cmix_new(card, "Master Mono Playback", AC97_MASTER_MONO, 0, ac97)) < 0) return err; } /* build master tone controls */ if (!(ac97->flags & AC97_HAS_NO_TONE)) { if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_TONE)) { for (idx = 0; idx < 2; idx++) { if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97))) < 0) return err; if (ac97->id == AC97_ID_YMF743 || ac97->id == AC97_ID_YMF753) { kctl->private_value &= ~(0xff << 16); kctl->private_value |= 7 << 16; } } snd_ac97_write_cache(ac97, AC97_MASTER_TONE, 0x0f0f); } } /* build Beep controls */ if (!(ac97->flags & AC97_HAS_NO_PC_BEEP) && ((ac97->flags & AC97_HAS_PC_BEEP) || snd_ac97_try_volume_mix(ac97, AC97_PC_BEEP))) { for (idx = 0; idx < 2; idx++) if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_pc_beep[idx], ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_4bit); snd_ac97_write_cache( ac97, AC97_PC_BEEP, (snd_ac97_read(ac97, AC97_PC_BEEP) | AC97_MUTE_MASK_MONO | 0x001e) ); } /* build Phone controls */ if (!(ac97->flags & AC97_HAS_NO_PHONE)) { if (snd_ac97_try_volume_mix(ac97, AC97_PHONE)) { if ((err = snd_ac97_cmix_new(card, "Phone Playback", AC97_PHONE, 1, ac97)) < 0) return err; } } /* build MIC controls */ if (!(ac97->flags & AC97_HAS_NO_MIC)) { if (snd_ac97_try_volume_mix(ac97, AC97_MIC)) { if ((err = snd_ac97_cmix_new(card, "Mic Playback", AC97_MIC, 1, ac97)) < 0) return err; if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97))) < 0) return err; } } /* build Line controls */ if (snd_ac97_try_volume_mix(ac97, AC97_LINE)) { if ((err = snd_ac97_cmix_new(card, "Line Playback", AC97_LINE, 1, ac97)) < 0) return err; } /* build CD controls */ if (!(ac97->flags & AC97_HAS_NO_CD)) { if (snd_ac97_try_volume_mix(ac97, AC97_CD)) { if ((err = snd_ac97_cmix_new(card, "CD Playback", AC97_CD, 1, ac97)) < 0) return err; } } /* build Video controls */ if (!(ac97->flags & AC97_HAS_NO_VIDEO)) { if (snd_ac97_try_volume_mix(ac97, AC97_VIDEO)) { if ((err = snd_ac97_cmix_new(card, "Video Playback", AC97_VIDEO, 1, ac97)) < 0) return err; } } /* build Aux controls */ if (!(ac97->flags & AC97_HAS_NO_AUX)) { if (snd_ac97_try_volume_mix(ac97, AC97_AUX)) { if ((err = snd_ac97_cmix_new(card, "Aux Playback", AC97_AUX, 1, ac97)) < 0) return err; } } /* build PCM controls */ if (ac97->flags & AC97_AD_MULTI) { unsigned short init_val; if (ac97->flags & AC97_STEREO_MUTES) init_val = 0x9f9f; else init_val = 0x9f1f; for (idx = 0; idx < 2; idx++) if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_pcm[idx], ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_5bit); ac97->spec.ad18xx.pcmreg[0] = init_val; if (ac97->scaps & AC97_SCAP_SURROUND_DAC) { for (idx = 0; idx < 2; idx++) if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_surround[idx], ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_5bit); ac97->spec.ad18xx.pcmreg[1] = init_val; } if (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC) { for (idx = 0; idx < 2; idx++) if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_center[idx], ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_5bit); for (idx = 0; idx < 2; idx++) if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_ad18xx_lfe[idx], ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_5bit); ac97->spec.ad18xx.pcmreg[2] = init_val; } snd_ac97_write_cache(ac97, AC97_PCM, init_val); } else { if (!(ac97->flags & AC97_HAS_NO_STD_PCM)) { if (ac97->flags & AC97_HAS_NO_PCM_VOL) err = snd_ac97_cmute_new(card, "PCM Playback Switch", AC97_PCM, 0, ac97); else err = snd_ac97_cmix_new(card, "PCM Playback", AC97_PCM, 0, ac97); if (err < 0) return err; } } /* build Capture controls */ if (!(ac97->flags & AC97_HAS_NO_REC_GAIN)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_control_capture_src, ac97))) < 0) return err; if (snd_ac97_try_bit(ac97, AC97_REC_GAIN, 15)) { err = snd_ac97_cmute_new(card, "Capture Switch", AC97_REC_GAIN, 0, ac97); if (err < 0) return err; } if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_control_capture_vol, ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_rec_gain); snd_ac97_write_cache(ac97, AC97_REC_SEL, 0x0000); snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x0000); } /* build MIC Capture controls */ if (snd_ac97_try_volume_mix(ac97, AC97_REC_GAIN_MIC)) { for (idx = 0; idx < 2; idx++) if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_mic_capture[idx], ac97))) < 0) return err; set_tlv_db_scale(kctl, db_scale_rec_gain); snd_ac97_write_cache(ac97, AC97_REC_GAIN_MIC, 0x0000); } /* build PCM out path & mute control */ if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 15)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_PCM_OUT], ac97))) < 0) return err; } /* build Simulated Stereo Enhancement control */ if (ac97->caps & AC97_BC_SIM_STEREO) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_STEREO_ENHANCEMENT], ac97))) < 0) return err; } /* build 3D Stereo Enhancement control */ if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 13)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_3D], ac97))) < 0) return err; } /* build Loudness control */ if (ac97->caps & AC97_BC_LOUDNESS) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOUDNESS], ac97))) < 0) return err; } /* build Mono output select control */ if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 9)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_MONO], ac97))) < 0) return err; } /* build Mic select control */ if (snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 8)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_MIC], ac97))) < 0) return err; } /* build ADC/DAC loopback control */ if (enable_loopback && snd_ac97_try_bit(ac97, AC97_GENERAL_PURPOSE, 7)) { if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_general[AC97_GENERAL_LOOPBACK], ac97))) < 0) return err; } snd_ac97_update_bits(ac97, AC97_GENERAL_PURPOSE, ~AC97_GP_DRSS_MASK, 0x0000); /* build 3D controls */ if (ac97->build_ops->build_3d) { ac97->build_ops->build_3d(ac97); } else { if (snd_ac97_try_volume_mix(ac97, AC97_3D_CONTROL)) { unsigned short val; val = 0x0707; snd_ac97_write(ac97, AC97_3D_CONTROL, val); val = snd_ac97_read(ac97, AC97_3D_CONTROL); val = val == 0x0606; if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0) return err; if (val) kctl->private_value = AC97_3D_CONTROL | (9 << 8) | (7 << 16); if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[1], ac97))) < 0) return err; if (val) kctl->private_value = AC97_3D_CONTROL | (1 << 8) | (7 << 16); snd_ac97_write_cache(ac97, AC97_3D_CONTROL, 0x0000); } } /* build S/PDIF controls */ /* Hack for ASUS P5P800-VM, which does not indicate S/PDIF capability */ if (ac97->subsystem_vendor == 0x1043 && ac97->subsystem_device == 0x810f) ac97->ext_id |= AC97_EI_SPDIF; if ((ac97->ext_id & AC97_EI_SPDIF) && !(ac97->scaps & AC97_SCAP_NO_SPDIF)) { if (ac97->build_ops->build_spdif) { if ((err = ac97->build_ops->build_spdif(ac97)) < 0) return err; } else { for (idx = 0; idx < 5; idx++) if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_spdif[idx], ac97))) < 0) return err; if (ac97->build_ops->build_post_spdif) { if ((err = ac97->build_ops->build_post_spdif(ac97)) < 0) return err; } /* set default PCM S/PDIF params */ /* consumer,PCM audio,no copyright,no preemphasis,PCM coder,original,48000Hz */ snd_ac97_write_cache(ac97, AC97_SPDIF, 0x2a20); ac97->rates[AC97_RATES_SPDIF] = snd_ac97_determine_spdif_rates(ac97); } ac97->spdif_status = SNDRV_PCM_DEFAULT_CON_SPDIF; } /* build chip specific controls */ if (ac97->build_ops->build_specific) if ((err = ac97->build_ops->build_specific(ac97)) < 0) return err; if (snd_ac97_try_bit(ac97, AC97_POWERDOWN, 15)) { kctl = snd_ac97_cnew(&snd_ac97_control_eapd, ac97); if (! kctl) return -ENOMEM; if (ac97->scaps & AC97_SCAP_INV_EAPD) set_inv_eapd(ac97, kctl); if ((err = snd_ctl_add(card, kctl)) < 0) return err; } return 0; } static int snd_ac97_modem_build(struct snd_card *card, struct snd_ac97 * ac97) { int err, idx; /* printk(KERN_DEBUG "AC97_GPIO_CFG = %x\n", snd_ac97_read(ac97,AC97_GPIO_CFG)); */ snd_ac97_write(ac97, AC97_GPIO_CFG, 0xffff & ~(AC97_GPIO_LINE1_OH)); snd_ac97_write(ac97, AC97_GPIO_POLARITY, 0xffff & ~(AC97_GPIO_LINE1_OH)); snd_ac97_write(ac97, AC97_GPIO_STICKY, 0xffff); snd_ac97_write(ac97, AC97_GPIO_WAKEUP, 0x0); snd_ac97_write(ac97, AC97_MISC_AFE, 0x0); /* build modem switches */ for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_modem_switches); idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ac97_controls_modem_switches[idx], ac97))) < 0) return err; /* build chip specific controls */ if (ac97->build_ops->build_specific) if ((err = ac97->build_ops->build_specific(ac97)) < 0) return err; return 0; } static int snd_ac97_test_rate(struct snd_ac97 *ac97, int reg, int shadow_reg, int rate) { unsigned short val; unsigned int tmp; tmp = ((unsigned int)rate * ac97->bus->clock) / 48000; snd_ac97_write_cache(ac97, reg, tmp & 0xffff); if (shadow_reg) snd_ac97_write_cache(ac97, shadow_reg, tmp & 0xffff); val = snd_ac97_read(ac97, reg); return val == (tmp & 0xffff); } static void snd_ac97_determine_rates(struct snd_ac97 *ac97, int reg, int shadow_reg, unsigned int *r_result) { unsigned int result = 0; unsigned short saved; if (ac97->bus->no_vra) { *r_result = SNDRV_PCM_RATE_48000; if ((ac97->flags & AC97_DOUBLE_RATE) && reg == AC97_PCM_FRONT_DAC_RATE) *r_result |= SNDRV_PCM_RATE_96000; return; } saved = snd_ac97_read(ac97, reg); if ((ac97->ext_id & AC97_EI_DRA) && reg == AC97_PCM_FRONT_DAC_RATE) snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_DRA, 0); /* test a non-standard rate */ if (snd_ac97_test_rate(ac97, reg, shadow_reg, 11000)) result |= SNDRV_PCM_RATE_CONTINUOUS; /* let's try to obtain standard rates */ if (snd_ac97_test_rate(ac97, reg, shadow_reg, 8000)) result |= SNDRV_PCM_RATE_8000; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 11025)) result |= SNDRV_PCM_RATE_11025; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 16000)) result |= SNDRV_PCM_RATE_16000; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 22050)) result |= SNDRV_PCM_RATE_22050; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 32000)) result |= SNDRV_PCM_RATE_32000; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 44100)) result |= SNDRV_PCM_RATE_44100; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 48000)) result |= SNDRV_PCM_RATE_48000; if ((ac97->flags & AC97_DOUBLE_RATE) && reg == AC97_PCM_FRONT_DAC_RATE) { /* test standard double rates */ snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_DRA, AC97_EA_DRA); if (snd_ac97_test_rate(ac97, reg, shadow_reg, 64000 / 2)) result |= SNDRV_PCM_RATE_64000; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 88200 / 2)) result |= SNDRV_PCM_RATE_88200; if (snd_ac97_test_rate(ac97, reg, shadow_reg, 96000 / 2)) result |= SNDRV_PCM_RATE_96000; /* some codecs don't support variable double rates */ if (!snd_ac97_test_rate(ac97, reg, shadow_reg, 76100 / 2)) result &= ~SNDRV_PCM_RATE_CONTINUOUS; snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_DRA, 0); } /* restore the default value */ snd_ac97_write_cache(ac97, reg, saved); if (shadow_reg) snd_ac97_write_cache(ac97, shadow_reg, saved); *r_result = result; } /* check AC97_SPDIF register to accept which sample rates */ static unsigned int snd_ac97_determine_spdif_rates(struct snd_ac97 *ac97) { unsigned int result = 0; int i; static unsigned short ctl_bits[] = { AC97_SC_SPSR_44K, AC97_SC_SPSR_32K, AC97_SC_SPSR_48K }; static unsigned int rate_bits[] = { SNDRV_PCM_RATE_44100, SNDRV_PCM_RATE_32000, SNDRV_PCM_RATE_48000 }; for (i = 0; i < (int)ARRAY_SIZE(ctl_bits); i++) { snd_ac97_update_bits(ac97, AC97_SPDIF, AC97_SC_SPSR_MASK, ctl_bits[i]); if ((snd_ac97_read(ac97, AC97_SPDIF) & AC97_SC_SPSR_MASK) == ctl_bits[i]) result |= rate_bits[i]; } return result; } /* look for the codec id table matching with the given id */ static const struct ac97_codec_id *look_for_codec_id(const struct ac97_codec_id *table, unsigned int id) { const struct ac97_codec_id *pid; for (pid = table; pid->id; pid++) if (pid->id == (id & pid->mask)) return pid; return NULL; } void snd_ac97_get_name(struct snd_ac97 *ac97, unsigned int id, char *name, int modem) { const struct ac97_codec_id *pid; sprintf(name, "0x%x %c%c%c", id, printable(id >> 24), printable(id >> 16), printable(id >> 8)); pid = look_for_codec_id(snd_ac97_codec_id_vendors, id); if (! pid) return; strcpy(name, pid->name); if (ac97 && pid->patch) { if ((modem && (pid->flags & AC97_MODEM_PATCH)) || (! modem && ! (pid->flags & AC97_MODEM_PATCH))) pid->patch(ac97); } pid = look_for_codec_id(snd_ac97_codec_ids, id); if (pid) { strcat(name, " "); strcat(name, pid->name); if (pid->mask != 0xffffffff) sprintf(name + strlen(name), " rev %d", id & ~pid->mask); if (ac97 && pid->patch) { if ((modem && (pid->flags & AC97_MODEM_PATCH)) || (! modem && ! (pid->flags & AC97_MODEM_PATCH))) pid->patch(ac97); } } else sprintf(name + strlen(name), " id %x", id & 0xff); } /** * snd_ac97_get_short_name - retrieve codec name * @ac97: the codec instance * * Return: The short identifying name of the codec. */ const char *snd_ac97_get_short_name(struct snd_ac97 *ac97) { const struct ac97_codec_id *pid; for (pid = snd_ac97_codec_ids; pid->id; pid++) if (pid->id == (ac97->id & pid->mask)) return pid->name; return "unknown codec"; } EXPORT_SYMBOL(snd_ac97_get_short_name); /* wait for a while until registers are accessible after RESET * return 0 if ok, negative not ready */ static int ac97_reset_wait(struct snd_ac97 *ac97, int timeout, int with_modem) { unsigned long end_time; unsigned short val; end_time = jiffies + timeout; do { /* use preliminary reads to settle the communication */ snd_ac97_read(ac97, AC97_RESET); snd_ac97_read(ac97, AC97_VENDOR_ID1); snd_ac97_read(ac97, AC97_VENDOR_ID2); /* modem? */ if (with_modem) { val = snd_ac97_read(ac97, AC97_EXTENDED_MID); if (val != 0xffff && (val & 1) != 0) return 0; } if (ac97->scaps & AC97_SCAP_DETECT_BY_VENDOR) { /* probably only Xbox issue - all registers are read as zero */ val = snd_ac97_read(ac97, AC97_VENDOR_ID1); if (val != 0 && val != 0xffff) return 0; } else { /* because the PCM or MASTER volume registers can be modified, * the REC_GAIN register is used for tests */ /* test if we can write to the record gain volume register */ snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x8a05); if ((snd_ac97_read(ac97, AC97_REC_GAIN) & 0x7fff) == 0x0a05) return 0; } schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); return -ENODEV; } /** * snd_ac97_bus - create an AC97 bus component * @card: the card instance * @num: the bus number * @ops: the bus callbacks table * @private_data: private data pointer for the new instance * @rbus: the pointer to store the new AC97 bus instance. * * Creates an AC97 bus component. An struct snd_ac97_bus instance is newly * allocated and initialized. * * The ops table must include valid callbacks (at least read and * write). The other callbacks, wait and reset, are not mandatory. * * The clock is set to 48000. If another clock is needed, set * (*rbus)->clock manually. * * The AC97 bus instance is registered as a low-level device, so you don't * have to release it manually. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ac97_bus(struct snd_card *card, int num, struct snd_ac97_bus_ops *ops, void *private_data, struct snd_ac97_bus **rbus) { int err; struct snd_ac97_bus *bus; static struct snd_device_ops dev_ops = { .dev_free = snd_ac97_bus_dev_free, }; if (snd_BUG_ON(!card)) return -EINVAL; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (bus == NULL) return -ENOMEM; bus->card = card; bus->num = num; bus->ops = ops; bus->private_data = private_data; bus->clock = 48000; spin_lock_init(&bus->bus_lock); snd_ac97_bus_proc_init(bus); if ((err = snd_device_new(card, SNDRV_DEV_BUS, bus, &dev_ops)) < 0) { snd_ac97_bus_free(bus); return err; } if (rbus) *rbus = bus; return 0; } EXPORT_SYMBOL(snd_ac97_bus); /* stop no dev release warning */ static void ac97_device_release(struct device * dev) { } /* register ac97 codec to bus */ static int snd_ac97_dev_register(struct snd_device *device) { struct snd_ac97 *ac97 = device->device_data; int err; ac97->dev.bus = &ac97_bus_type; ac97->dev.parent = ac97->bus->card->dev; ac97->dev.release = ac97_device_release; dev_set_name(&ac97->dev, "%d-%d:%s", ac97->bus->card->number, ac97->num, snd_ac97_get_short_name(ac97)); if ((err = device_register(&ac97->dev)) < 0) { snd_printk(KERN_ERR "Can't register ac97 bus\n"); ac97->dev.bus = NULL; return err; } return 0; } /* disconnect ac97 codec */ static int snd_ac97_dev_disconnect(struct snd_device *device) { struct snd_ac97 *ac97 = device->device_data; if (ac97->dev.bus) device_unregister(&ac97->dev); return 0; } /* build_ops to do nothing */ static const struct snd_ac97_build_ops null_build_ops; #ifdef CONFIG_SND_AC97_POWER_SAVE static void do_update_power(struct work_struct *work) { update_power_regs( container_of(work, struct snd_ac97, power_work.work)); } #endif /** * snd_ac97_mixer - create an Codec97 component * @bus: the AC97 bus which codec is attached to * @template: the template of ac97, including index, callbacks and * the private data. * @rac97: the pointer to store the new ac97 instance. * * Creates an Codec97 component. An struct snd_ac97 instance is newly * allocated and initialized from the template. The codec * is then initialized by the standard procedure. * * The template must include the codec number (num) and address (addr), * and the private data (private_data). * * The ac97 instance is registered as a low-level device, so you don't * have to release it manually. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template, struct snd_ac97 **rac97) { int err; struct snd_ac97 *ac97; struct snd_card *card; char name[64]; unsigned long end_time; unsigned int reg; const struct ac97_codec_id *pid; static struct snd_device_ops ops = { .dev_free = snd_ac97_dev_free, .dev_register = snd_ac97_dev_register, .dev_disconnect = snd_ac97_dev_disconnect, }; if (rac97) *rac97 = NULL; if (snd_BUG_ON(!bus || !template)) return -EINVAL; if (snd_BUG_ON(template->num >= 4)) return -EINVAL; if (bus->codec[template->num]) return -EBUSY; card = bus->card; ac97 = kzalloc(sizeof(*ac97), GFP_KERNEL); if (ac97 == NULL) return -ENOMEM; ac97->private_data = template->private_data; ac97->private_free = template->private_free; ac97->bus = bus; ac97->pci = template->pci; ac97->num = template->num; ac97->addr = template->addr; ac97->scaps = template->scaps; ac97->res_table = template->res_table; bus->codec[ac97->num] = ac97; mutex_init(&ac97->reg_mutex); mutex_init(&ac97->page_mutex); #ifdef CONFIG_SND_AC97_POWER_SAVE INIT_DELAYED_WORK(&ac97->power_work, do_update_power); #endif #ifdef CONFIG_PCI if (ac97->pci) { pci_read_config_word(ac97->pci, PCI_SUBSYSTEM_VENDOR_ID, &ac97->subsystem_vendor); pci_read_config_word(ac97->pci, PCI_SUBSYSTEM_ID, &ac97->subsystem_device); } #endif if (bus->ops->reset) { bus->ops->reset(ac97); goto __access_ok; } ac97->id = snd_ac97_read(ac97, AC97_VENDOR_ID1) << 16; ac97->id |= snd_ac97_read(ac97, AC97_VENDOR_ID2); if (ac97->id && ac97->id != (unsigned int)-1) { pid = look_for_codec_id(snd_ac97_codec_ids, ac97->id); if (pid && (pid->flags & AC97_DEFAULT_POWER_OFF)) goto __access_ok; } /* reset to defaults */ if (!(ac97->scaps & AC97_SCAP_SKIP_AUDIO)) snd_ac97_write(ac97, AC97_RESET, 0); if (!(ac97->scaps & AC97_SCAP_SKIP_MODEM)) snd_ac97_write(ac97, AC97_EXTENDED_MID, 0); if (bus->ops->wait) bus->ops->wait(ac97); else { udelay(50); if (ac97->scaps & AC97_SCAP_SKIP_AUDIO) err = ac97_reset_wait(ac97, msecs_to_jiffies(500), 1); else { err = ac97_reset_wait(ac97, msecs_to_jiffies(500), 0); if (err < 0) err = ac97_reset_wait(ac97, msecs_to_jiffies(500), 1); } if (err < 0) { snd_printk(KERN_WARNING "AC'97 %d does not respond - RESET\n", ac97->num); /* proceed anyway - it's often non-critical */ } } __access_ok: ac97->id = snd_ac97_read(ac97, AC97_VENDOR_ID1) << 16; ac97->id |= snd_ac97_read(ac97, AC97_VENDOR_ID2); if (! (ac97->scaps & AC97_SCAP_DETECT_BY_VENDOR) && (ac97->id == 0x00000000 || ac97->id == 0xffffffff)) { snd_printk(KERN_ERR "AC'97 %d access is not valid [0x%x], removing mixer.\n", ac97->num, ac97->id); snd_ac97_free(ac97); return -EIO; } pid = look_for_codec_id(snd_ac97_codec_ids, ac97->id); if (pid) ac97->flags |= pid->flags; /* test for AC'97 */ if (!(ac97->scaps & AC97_SCAP_SKIP_AUDIO) && !(ac97->scaps & AC97_SCAP_AUDIO)) { /* test if we can write to the record gain volume register */ snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x8a06); if (((err = snd_ac97_read(ac97, AC97_REC_GAIN)) & 0x7fff) == 0x0a06) ac97->scaps |= AC97_SCAP_AUDIO; } if (ac97->scaps & AC97_SCAP_AUDIO) { ac97->caps = snd_ac97_read(ac97, AC97_RESET); ac97->ext_id = snd_ac97_read(ac97, AC97_EXTENDED_ID); if (ac97->ext_id == 0xffff) /* invalid combination */ ac97->ext_id = 0; } /* test for MC'97 */ if (!(ac97->scaps & AC97_SCAP_SKIP_MODEM) && !(ac97->scaps & AC97_SCAP_MODEM)) { ac97->ext_mid = snd_ac97_read(ac97, AC97_EXTENDED_MID); if (ac97->ext_mid == 0xffff) /* invalid combination */ ac97->ext_mid = 0; if (ac97->ext_mid & 1) ac97->scaps |= AC97_SCAP_MODEM; } if (!ac97_is_audio(ac97) && !ac97_is_modem(ac97)) { if (!(ac97->scaps & (AC97_SCAP_SKIP_AUDIO|AC97_SCAP_SKIP_MODEM))) snd_printk(KERN_ERR "AC'97 %d access error (not audio or modem codec)\n", ac97->num); snd_ac97_free(ac97); return -EACCES; } if (bus->ops->reset) // FIXME: always skipping? goto __ready_ok; /* FIXME: add powerdown control */ if (ac97_is_audio(ac97)) { /* nothing should be in powerdown mode */ snd_ac97_write_cache(ac97, AC97_POWERDOWN, 0); if (! (ac97->flags & AC97_DEFAULT_POWER_OFF)) { snd_ac97_write_cache(ac97, AC97_RESET, 0); /* reset to defaults */ udelay(100); snd_ac97_write_cache(ac97, AC97_POWERDOWN, 0); } /* nothing should be in powerdown mode */ snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0); end_time = jiffies + msecs_to_jiffies(5000); do { if ((snd_ac97_read(ac97, AC97_POWERDOWN) & 0x0f) == 0x0f) goto __ready_ok; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_WARNING "AC'97 %d analog subsections not ready\n", ac97->num); } /* FIXME: add powerdown control */ if (ac97_is_modem(ac97)) { unsigned char tmp; /* nothing should be in powerdown mode */ /* note: it's important to set the rate at first */ tmp = AC97_MEA_GPIO; if (ac97->ext_mid & AC97_MEI_LINE1) { snd_ac97_write_cache(ac97, AC97_LINE1_RATE, 8000); tmp |= AC97_MEA_ADC1 | AC97_MEA_DAC1; } if (ac97->ext_mid & AC97_MEI_LINE2) { snd_ac97_write_cache(ac97, AC97_LINE2_RATE, 8000); tmp |= AC97_MEA_ADC2 | AC97_MEA_DAC2; } if (ac97->ext_mid & AC97_MEI_HANDSET) { snd_ac97_write_cache(ac97, AC97_HANDSET_RATE, 8000); tmp |= AC97_MEA_HADC | AC97_MEA_HDAC; } snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0); udelay(100); /* nothing should be in powerdown mode */ snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0); end_time = jiffies + msecs_to_jiffies(100); do { if ((snd_ac97_read(ac97, AC97_EXTENDED_MSTATUS) & tmp) == tmp) goto __ready_ok; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_WARNING "MC'97 %d converters and GPIO not ready (0x%x)\n", ac97->num, snd_ac97_read(ac97, AC97_EXTENDED_MSTATUS)); } __ready_ok: if (ac97_is_audio(ac97)) ac97->addr = (ac97->ext_id & AC97_EI_ADDR_MASK) >> AC97_EI_ADDR_SHIFT; else ac97->addr = (ac97->ext_mid & AC97_MEI_ADDR_MASK) >> AC97_MEI_ADDR_SHIFT; if (ac97->ext_id & 0x01c9) { /* L/R, MIC, SDAC, LDAC VRA support */ reg = snd_ac97_read(ac97, AC97_EXTENDED_STATUS); reg |= ac97->ext_id & 0x01c0; /* LDAC/SDAC/CDAC */ if (! bus->no_vra) reg |= ac97->ext_id & 0x0009; /* VRA/VRM */ snd_ac97_write_cache(ac97, AC97_EXTENDED_STATUS, reg); } if ((ac97->ext_id & AC97_EI_DRA) && bus->dra) { /* Intel controllers require double rate data to be put in * slots 7+8, so let's hope the codec supports it. */ snd_ac97_update_bits(ac97, AC97_GENERAL_PURPOSE, AC97_GP_DRSS_MASK, AC97_GP_DRSS_78); if ((snd_ac97_read(ac97, AC97_GENERAL_PURPOSE) & AC97_GP_DRSS_MASK) == AC97_GP_DRSS_78) ac97->flags |= AC97_DOUBLE_RATE; /* restore to slots 10/11 to avoid the confliction with surrounds */ snd_ac97_update_bits(ac97, AC97_GENERAL_PURPOSE, AC97_GP_DRSS_MASK, 0); } if (ac97->ext_id & AC97_EI_VRA) { /* VRA support */ snd_ac97_determine_rates(ac97, AC97_PCM_FRONT_DAC_RATE, 0, &ac97->rates[AC97_RATES_FRONT_DAC]); snd_ac97_determine_rates(ac97, AC97_PCM_LR_ADC_RATE, 0, &ac97->rates[AC97_RATES_ADC]); } else { ac97->rates[AC97_RATES_FRONT_DAC] = SNDRV_PCM_RATE_48000; if (ac97->flags & AC97_DOUBLE_RATE) ac97->rates[AC97_RATES_FRONT_DAC] |= SNDRV_PCM_RATE_96000; ac97->rates[AC97_RATES_ADC] = SNDRV_PCM_RATE_48000; } if (ac97->ext_id & AC97_EI_SPDIF) { /* codec specific code (patch) should override these values */ ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_32000; } if (ac97->ext_id & AC97_EI_VRM) { /* MIC VRA support */ snd_ac97_determine_rates(ac97, AC97_PCM_MIC_ADC_RATE, 0, &ac97->rates[AC97_RATES_MIC_ADC]); } else { ac97->rates[AC97_RATES_MIC_ADC] = SNDRV_PCM_RATE_48000; } if (ac97->ext_id & AC97_EI_SDAC) { /* SDAC support */ snd_ac97_determine_rates(ac97, AC97_PCM_SURR_DAC_RATE, AC97_PCM_FRONT_DAC_RATE, &ac97->rates[AC97_RATES_SURR_DAC]); ac97->scaps |= AC97_SCAP_SURROUND_DAC; } if (ac97->ext_id & AC97_EI_LDAC) { /* LDAC support */ snd_ac97_determine_rates(ac97, AC97_PCM_LFE_DAC_RATE, AC97_PCM_FRONT_DAC_RATE, &ac97->rates[AC97_RATES_LFE_DAC]); ac97->scaps |= AC97_SCAP_CENTER_LFE_DAC; } /* additional initializations */ if (bus->ops->init) bus->ops->init(ac97); snd_ac97_get_name(ac97, ac97->id, name, !ac97_is_audio(ac97)); snd_ac97_get_name(NULL, ac97->id, name, !ac97_is_audio(ac97)); // ac97->id might be changed in the special setup code if (! ac97->build_ops) ac97->build_ops = &null_build_ops; if (ac97_is_audio(ac97)) { char comp[16]; if (card->mixername[0] == '\0') { strcpy(card->mixername, name); } else { if (strlen(card->mixername) + 1 + strlen(name) + 1 <= sizeof(card->mixername)) { strcat(card->mixername, ","); strcat(card->mixername, name); } } sprintf(comp, "AC97a:%08x", ac97->id); if ((err = snd_component_add(card, comp)) < 0) { snd_ac97_free(ac97); return err; } if (snd_ac97_mixer_build(ac97) < 0) { snd_ac97_free(ac97); return -ENOMEM; } } if (ac97_is_modem(ac97)) { char comp[16]; if (card->mixername[0] == '\0') { strcpy(card->mixername, name); } else { if (strlen(card->mixername) + 1 + strlen(name) + 1 <= sizeof(card->mixername)) { strcat(card->mixername, ","); strcat(card->mixername, name); } } sprintf(comp, "AC97m:%08x", ac97->id); if ((err = snd_component_add(card, comp)) < 0) { snd_ac97_free(ac97); return err; } if (snd_ac97_modem_build(card, ac97) < 0) { snd_ac97_free(ac97); return -ENOMEM; } } if (ac97_is_audio(ac97)) update_power_regs(ac97); snd_ac97_proc_init(ac97); if ((err = snd_device_new(card, SNDRV_DEV_CODEC, ac97, &ops)) < 0) { snd_ac97_free(ac97); return err; } *rac97 = ac97; return 0; } EXPORT_SYMBOL(snd_ac97_mixer); /* * Power down the chip. * * MASTER and HEADPHONE registers are muted but the register cache values * are not changed, so that the values can be restored in snd_ac97_resume(). */ static void snd_ac97_powerdown(struct snd_ac97 *ac97) { unsigned short power; if (ac97_is_audio(ac97)) { /* some codecs have stereo mute bits */ snd_ac97_write(ac97, AC97_MASTER, 0x9f9f); snd_ac97_write(ac97, AC97_HEADPHONE, 0x9f9f); } /* surround, CLFE, mic powerdown */ power = ac97->regs[AC97_EXTENDED_STATUS]; if (ac97->scaps & AC97_SCAP_SURROUND_DAC) power |= AC97_EA_PRJ; if (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC) power |= AC97_EA_PRI | AC97_EA_PRK; power |= AC97_EA_PRL; snd_ac97_write(ac97, AC97_EXTENDED_STATUS, power); /* powerdown external amplifier */ if (ac97->scaps & AC97_SCAP_INV_EAPD) power = ac97->regs[AC97_POWERDOWN] & ~AC97_PD_EAPD; else if (! (ac97->scaps & AC97_SCAP_EAPD_LED)) power = ac97->regs[AC97_POWERDOWN] | AC97_PD_EAPD; power |= AC97_PD_PR6; /* Headphone amplifier powerdown */ power |= AC97_PD_PR0 | AC97_PD_PR1; /* ADC & DAC powerdown */ snd_ac97_write(ac97, AC97_POWERDOWN, power); udelay(100); power |= AC97_PD_PR2; /* Analog Mixer powerdown (Vref on) */ snd_ac97_write(ac97, AC97_POWERDOWN, power); if (ac97_is_power_save_mode(ac97)) { power |= AC97_PD_PR3; /* Analog Mixer powerdown */ snd_ac97_write(ac97, AC97_POWERDOWN, power); udelay(100); /* AC-link powerdown, internal Clk disable */ /* FIXME: this may cause click noises on some boards */ power |= AC97_PD_PR4 | AC97_PD_PR5; snd_ac97_write(ac97, AC97_POWERDOWN, power); } } struct ac97_power_reg { unsigned short reg; unsigned short power_reg; unsigned short mask; }; enum { PWIDX_ADC, PWIDX_FRONT, PWIDX_CLFE, PWIDX_SURR, PWIDX_MIC, PWIDX_SIZE }; static struct ac97_power_reg power_regs[PWIDX_SIZE] = { [PWIDX_ADC] = { AC97_PCM_LR_ADC_RATE, AC97_POWERDOWN, AC97_PD_PR0}, [PWIDX_FRONT] = { AC97_PCM_FRONT_DAC_RATE, AC97_POWERDOWN, AC97_PD_PR1}, [PWIDX_CLFE] = { AC97_PCM_LFE_DAC_RATE, AC97_EXTENDED_STATUS, AC97_EA_PRI | AC97_EA_PRK}, [PWIDX_SURR] = { AC97_PCM_SURR_DAC_RATE, AC97_EXTENDED_STATUS, AC97_EA_PRJ}, [PWIDX_MIC] = { AC97_PCM_MIC_ADC_RATE, AC97_EXTENDED_STATUS, AC97_EA_PRL}, }; #ifdef CONFIG_SND_AC97_POWER_SAVE /** * snd_ac97_update_power - update the powerdown register * @ac97: the codec instance * @reg: the rate register, e.g. AC97_PCM_FRONT_DAC_RATE * @powerup: non-zero when power up the part * * Update the AC97 powerdown register bits of the given part. * * Return: Zero. */ int snd_ac97_update_power(struct snd_ac97 *ac97, int reg, int powerup) { int i; if (! ac97) return 0; if (reg) { /* SPDIF requires DAC power, too */ if (reg == AC97_SPDIF) reg = AC97_PCM_FRONT_DAC_RATE; for (i = 0; i < PWIDX_SIZE; i++) { if (power_regs[i].reg == reg) { if (powerup) ac97->power_up |= (1 << i); else ac97->power_up &= ~(1 << i); break; } } } if (ac97_is_power_save_mode(ac97) && !powerup) /* adjust power-down bits after two seconds delay * (for avoiding loud click noises for many (OSS) apps * that open/close frequently) */ schedule_delayed_work(&ac97->power_work, msecs_to_jiffies(power_save * 1000)); else { cancel_delayed_work(&ac97->power_work); update_power_regs(ac97); } return 0; } EXPORT_SYMBOL(snd_ac97_update_power); #endif /* CONFIG_SND_AC97_POWER_SAVE */ static void update_power_regs(struct snd_ac97 *ac97) { unsigned int power_up, bits; int i; power_up = (1 << PWIDX_FRONT) | (1 << PWIDX_ADC); power_up |= (1 << PWIDX_MIC); if (ac97->scaps & AC97_SCAP_SURROUND_DAC) power_up |= (1 << PWIDX_SURR); if (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC) power_up |= (1 << PWIDX_CLFE); #ifdef CONFIG_SND_AC97_POWER_SAVE if (ac97_is_power_save_mode(ac97)) power_up = ac97->power_up; #endif if (power_up) { if (ac97->regs[AC97_POWERDOWN] & AC97_PD_PR2) { /* needs power-up analog mix and vref */ snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_PR3, 0); msleep(1); snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_PR2, 0); } } for (i = 0; i < PWIDX_SIZE; i++) { if (power_up & (1 << i)) bits = 0; else bits = power_regs[i].mask; snd_ac97_update_bits(ac97, power_regs[i].power_reg, power_regs[i].mask, bits); } if (! power_up) { if (! (ac97->regs[AC97_POWERDOWN] & AC97_PD_PR2)) { /* power down analog mix and vref */ snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_PR2, AC97_PD_PR2); snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_PR3, AC97_PD_PR3); } } } #ifdef CONFIG_PM /** * snd_ac97_suspend - General suspend function for AC97 codec * @ac97: the ac97 instance * * Suspends the codec, power down the chip. */ void snd_ac97_suspend(struct snd_ac97 *ac97) { if (! ac97) return; if (ac97->build_ops->suspend) ac97->build_ops->suspend(ac97); #ifdef CONFIG_SND_AC97_POWER_SAVE cancel_delayed_work_sync(&ac97->power_work); #endif snd_ac97_powerdown(ac97); } EXPORT_SYMBOL(snd_ac97_suspend); /* * restore ac97 status */ static void snd_ac97_restore_status(struct snd_ac97 *ac97) { int i; for (i = 2; i < 0x7c ; i += 2) { if (i == AC97_POWERDOWN || i == AC97_EXTENDED_ID) continue; /* restore only accessible registers * some chip (e.g. nm256) may hang up when unsupported registers * are accessed..! */ if (test_bit(i, ac97->reg_accessed)) { snd_ac97_write(ac97, i, ac97->regs[i]); snd_ac97_read(ac97, i); } } } /* * restore IEC958 status */ static void snd_ac97_restore_iec958(struct snd_ac97 *ac97) { if (ac97->ext_id & AC97_EI_SPDIF) { if (ac97->regs[AC97_EXTENDED_STATUS] & AC97_EA_SPDIF) { /* reset spdif status */ snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); snd_ac97_write(ac97, AC97_EXTENDED_STATUS, ac97->regs[AC97_EXTENDED_STATUS]); if (ac97->flags & AC97_CS_SPDIF) snd_ac97_write(ac97, AC97_CSR_SPDIF, ac97->regs[AC97_CSR_SPDIF]); else snd_ac97_write(ac97, AC97_SPDIF, ac97->regs[AC97_SPDIF]); snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, AC97_EA_SPDIF); /* turn on again */ } } } /** * snd_ac97_resume - General resume function for AC97 codec * @ac97: the ac97 instance * * Do the standard resume procedure, power up and restoring the * old register values. */ void snd_ac97_resume(struct snd_ac97 *ac97) { unsigned long end_time; if (! ac97) return; if (ac97->bus->ops->reset) { ac97->bus->ops->reset(ac97); goto __reset_ready; } snd_ac97_write(ac97, AC97_POWERDOWN, 0); if (! (ac97->flags & AC97_DEFAULT_POWER_OFF)) { if (!(ac97->scaps & AC97_SCAP_SKIP_AUDIO)) snd_ac97_write(ac97, AC97_RESET, 0); else if (!(ac97->scaps & AC97_SCAP_SKIP_MODEM)) snd_ac97_write(ac97, AC97_EXTENDED_MID, 0); udelay(100); snd_ac97_write(ac97, AC97_POWERDOWN, 0); } snd_ac97_write(ac97, AC97_GENERAL_PURPOSE, 0); snd_ac97_write(ac97, AC97_POWERDOWN, ac97->regs[AC97_POWERDOWN]); if (ac97_is_audio(ac97)) { ac97->bus->ops->write(ac97, AC97_MASTER, 0x8101); end_time = jiffies + msecs_to_jiffies(100); do { if (snd_ac97_read(ac97, AC97_MASTER) == 0x8101) break; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); /* FIXME: extra delay */ ac97->bus->ops->write(ac97, AC97_MASTER, AC97_MUTE_MASK_MONO); if (snd_ac97_read(ac97, AC97_MASTER) != AC97_MUTE_MASK_MONO) msleep(250); } else { end_time = jiffies + msecs_to_jiffies(100); do { unsigned short val = snd_ac97_read(ac97, AC97_EXTENDED_MID); if (val != 0xffff && (val & 1) != 0) break; schedule_timeout_uninterruptible(1); } while (time_after_eq(end_time, jiffies)); } __reset_ready: if (ac97->bus->ops->init) ac97->bus->ops->init(ac97); if (ac97->build_ops->resume) ac97->build_ops->resume(ac97); else { snd_ac97_restore_status(ac97); snd_ac97_restore_iec958(ac97); } } EXPORT_SYMBOL(snd_ac97_resume); #endif /* * Hardware tuning */ static void set_ctl_name(char *dst, const char *src, const char *suffix) { if (suffix) sprintf(dst, "%s %s", src, suffix); else strcpy(dst, src); } /* remove the control with the given name and optional suffix */ static int snd_ac97_remove_ctl(struct snd_ac97 *ac97, const char *name, const char *suffix) { struct snd_ctl_elem_id id; memset(&id, 0, sizeof(id)); set_ctl_name(id.name, name, suffix); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; return snd_ctl_remove_id(ac97->bus->card, &id); } static struct snd_kcontrol *ctl_find(struct snd_ac97 *ac97, const char *name, const char *suffix) { struct snd_ctl_elem_id sid; memset(&sid, 0, sizeof(sid)); set_ctl_name(sid.name, name, suffix); sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER; return snd_ctl_find_id(ac97->bus->card, &sid); } /* rename the control with the given name and optional suffix */ static int snd_ac97_rename_ctl(struct snd_ac97 *ac97, const char *src, const char *dst, const char *suffix) { struct snd_kcontrol *kctl = ctl_find(ac97, src, suffix); if (kctl) { set_ctl_name(kctl->id.name, dst, suffix); return 0; } return -ENOENT; } /* rename both Volume and Switch controls - don't check the return value */ static void snd_ac97_rename_vol_ctl(struct snd_ac97 *ac97, const char *src, const char *dst) { snd_ac97_rename_ctl(ac97, src, dst, "Switch"); snd_ac97_rename_ctl(ac97, src, dst, "Volume"); } /* swap controls */ static int snd_ac97_swap_ctl(struct snd_ac97 *ac97, const char *s1, const char *s2, const char *suffix) { struct snd_kcontrol *kctl1, *kctl2; kctl1 = ctl_find(ac97, s1, suffix); kctl2 = ctl_find(ac97, s2, suffix); if (kctl1 && kctl2) { set_ctl_name(kctl1->id.name, s2, suffix); set_ctl_name(kctl2->id.name, s1, suffix); return 0; } return -ENOENT; } #if 1 /* bind hp and master controls instead of using only hp control */ static int bind_hp_volsw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int err = snd_ac97_put_volsw(kcontrol, ucontrol); if (err > 0) { unsigned long priv_saved = kcontrol->private_value; kcontrol->private_value = (kcontrol->private_value & ~0xff) | AC97_HEADPHONE; snd_ac97_put_volsw(kcontrol, ucontrol); kcontrol->private_value = priv_saved; } return err; } /* ac97 tune: bind Master and Headphone controls */ static int tune_hp_only(struct snd_ac97 *ac97) { struct snd_kcontrol *msw = ctl_find(ac97, "Master Playback Switch", NULL); struct snd_kcontrol *mvol = ctl_find(ac97, "Master Playback Volume", NULL); if (! msw || ! mvol) return -ENOENT; msw->put = bind_hp_volsw_put; mvol->put = bind_hp_volsw_put; snd_ac97_remove_ctl(ac97, "Headphone Playback", "Switch"); snd_ac97_remove_ctl(ac97, "Headphone Playback", "Volume"); return 0; } #else /* ac97 tune: use Headphone control as master */ static int tune_hp_only(struct snd_ac97 *ac97) { if (ctl_find(ac97, "Headphone Playback Switch", NULL) == NULL) return -ENOENT; snd_ac97_remove_ctl(ac97, "Master Playback", "Switch"); snd_ac97_remove_ctl(ac97, "Master Playback", "Volume"); snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Master Playback"); return 0; } #endif /* ac97 tune: swap Headphone and Master controls */ static int tune_swap_hp(struct snd_ac97 *ac97) { if (ctl_find(ac97, "Headphone Playback Switch", NULL) == NULL) return -ENOENT; snd_ac97_rename_vol_ctl(ac97, "Master Playback", "Line-Out Playback"); snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Master Playback"); return 0; } /* ac97 tune: swap Surround and Master controls */ static int tune_swap_surround(struct snd_ac97 *ac97) { if (snd_ac97_swap_ctl(ac97, "Master Playback", "Surround Playback", "Switch") || snd_ac97_swap_ctl(ac97, "Master Playback", "Surround Playback", "Volume")) return -ENOENT; return 0; } /* ac97 tune: set up mic sharing for AD codecs */ static int tune_ad_sharing(struct snd_ac97 *ac97) { unsigned short scfg; if ((ac97->id & 0xffffff00) != 0x41445300) { snd_printk(KERN_ERR "ac97_quirk AD_SHARING is only for AD codecs\n"); return -EINVAL; } /* Turn on OMS bit to route microphone to back panel */ scfg = snd_ac97_read(ac97, AC97_AD_SERIAL_CFG); snd_ac97_write_cache(ac97, AC97_AD_SERIAL_CFG, scfg | 0x0200); return 0; } static const struct snd_kcontrol_new snd_ac97_alc_jack_detect = AC97_SINGLE("Jack Detect", AC97_ALC650_CLOCK, 5, 1, 0); /* ac97 tune: set up ALC jack-select */ static int tune_alc_jack(struct snd_ac97 *ac97) { if ((ac97->id & 0xffffff00) != 0x414c4700) { snd_printk(KERN_ERR "ac97_quirk ALC_JACK is only for Realtek codecs\n"); return -EINVAL; } snd_ac97_update_bits(ac97, 0x7a, 0x20, 0x20); /* select jack detect function */ snd_ac97_update_bits(ac97, 0x7a, 0x01, 0x01); /* Line-out auto mute */ if (ac97->id == AC97_ID_ALC658D) snd_ac97_update_bits(ac97, 0x74, 0x0800, 0x0800); return snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&snd_ac97_alc_jack_detect, ac97)); } /* ac97 tune: inversed EAPD bit */ static int tune_inv_eapd(struct snd_ac97 *ac97) { struct snd_kcontrol *kctl = ctl_find(ac97, "External Amplifier", NULL); if (! kctl) return -ENOENT; set_inv_eapd(ac97, kctl); return 0; } static int master_mute_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int err = snd_ac97_put_volsw(kcontrol, ucontrol); if (err > 0) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int shift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; unsigned short mask; if (shift != rshift) mask = AC97_MUTE_MASK_STEREO; else mask = AC97_MUTE_MASK_MONO; snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_EAPD, (ac97->regs[AC97_MASTER] & mask) == mask ? AC97_PD_EAPD : 0); } return err; } /* ac97 tune: EAPD controls mute LED bound with the master mute */ static int tune_mute_led(struct snd_ac97 *ac97) { struct snd_kcontrol *msw = ctl_find(ac97, "Master Playback Switch", NULL); if (! msw) return -ENOENT; msw->put = master_mute_sw_put; snd_ac97_remove_ctl(ac97, "External Amplifier", NULL); snd_ac97_update_bits( ac97, AC97_POWERDOWN, AC97_PD_EAPD, AC97_PD_EAPD /* mute LED on */ ); ac97->scaps |= AC97_SCAP_EAPD_LED; return 0; } static int hp_master_mute_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int err = bind_hp_volsw_put(kcontrol, ucontrol); if (err > 0) { struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol); int shift = (kcontrol->private_value >> 8) & 0x0f; int rshift = (kcontrol->private_value >> 12) & 0x0f; unsigned short mask; if (shift != rshift) mask = AC97_MUTE_MASK_STEREO; else mask = AC97_MUTE_MASK_MONO; snd_ac97_update_bits(ac97, AC97_POWERDOWN, AC97_PD_EAPD, (ac97->regs[AC97_MASTER] & mask) == mask ? AC97_PD_EAPD : 0); } return err; } static int tune_hp_mute_led(struct snd_ac97 *ac97) { struct snd_kcontrol *msw = ctl_find(ac97, "Master Playback Switch", NULL); struct snd_kcontrol *mvol = ctl_find(ac97, "Master Playback Volume", NULL); if (! msw || ! mvol) return -ENOENT; msw->put = hp_master_mute_sw_put; mvol->put = bind_hp_volsw_put; snd_ac97_remove_ctl(ac97, "External Amplifier", NULL); snd_ac97_remove_ctl(ac97, "Headphone Playback", "Switch"); snd_ac97_remove_ctl(ac97, "Headphone Playback", "Volume"); snd_ac97_update_bits( ac97, AC97_POWERDOWN, AC97_PD_EAPD, AC97_PD_EAPD /* mute LED on */ ); return 0; } struct quirk_table { const char *name; int (*func)(struct snd_ac97 *); }; static struct quirk_table applicable_quirks[] = { { "none", NULL }, { "hp_only", tune_hp_only }, { "swap_hp", tune_swap_hp }, { "swap_surround", tune_swap_surround }, { "ad_sharing", tune_ad_sharing }, { "alc_jack", tune_alc_jack }, { "inv_eapd", tune_inv_eapd }, { "mute_led", tune_mute_led }, { "hp_mute_led", tune_hp_mute_led }, }; /* apply the quirk with the given type */ static int apply_quirk(struct snd_ac97 *ac97, int type) { if (type <= 0) return 0; else if (type >= ARRAY_SIZE(applicable_quirks)) return -EINVAL; if (applicable_quirks[type].func) return applicable_quirks[type].func(ac97); return 0; } /* apply the quirk with the given name */ static int apply_quirk_str(struct snd_ac97 *ac97, const char *typestr) { int i; struct quirk_table *q; for (i = 0; i < ARRAY_SIZE(applicable_quirks); i++) { q = &applicable_quirks[i]; if (q->name && ! strcmp(typestr, q->name)) return apply_quirk(ac97, i); } /* for compatibility, accept the numbers, too */ if (*typestr >= '0' && *typestr <= '9') return apply_quirk(ac97, (int)simple_strtoul(typestr, NULL, 10)); return -EINVAL; } /** * snd_ac97_tune_hardware - tune up the hardware * @ac97: the ac97 instance * @quirk: quirk list * @override: explicit quirk value (overrides the list if non-NULL) * * Do some workaround for each pci device, such as renaming of the * headphone (true line-out) control as "Master". * The quirk-list must be terminated with a zero-filled entry. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ac97_tune_hardware(struct snd_ac97 *ac97, struct ac97_quirk *quirk, const char *override) { int result; /* quirk overriden? */ if (override && strcmp(override, "-1") && strcmp(override, "default")) { result = apply_quirk_str(ac97, override); if (result < 0) snd_printk(KERN_ERR "applying quirk type %s failed (%d)\n", override, result); return result; } if (! quirk) return -EINVAL; for (; quirk->subvendor; quirk++) { if (quirk->subvendor != ac97->subsystem_vendor) continue; if ((! quirk->mask && quirk->subdevice == ac97->subsystem_device) || quirk->subdevice == (quirk->mask & ac97->subsystem_device)) { if (quirk->codec_id && quirk->codec_id != ac97->id) continue; snd_printdd("ac97 quirk for %s (%04x:%04x)\n", quirk->name, ac97->subsystem_vendor, ac97->subsystem_device); result = apply_quirk(ac97, quirk->type); if (result < 0) snd_printk(KERN_ERR "applying quirk type %d for %s failed (%d)\n", quirk->type, quirk->name, result); return result; } } return 0; } EXPORT_SYMBOL(snd_ac97_tune_hardware); /* * INIT part */ static int __init alsa_ac97_init(void) { return 0; } static void __exit alsa_ac97_exit(void) { } module_init(alsa_ac97_init) module_exit(alsa_ac97_exit)
gpl-2.0
SlimRoms/kernel_htc_msm8994
arch/x86/kvm/pmu.c
2083
13774
/* * Kernel-based Virtual Machine -- Performance Monitoring Unit support * * Copyright 2011 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@redhat.com> * Gleb Natapov <gleb@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/types.h> #include <linux/kvm_host.h> #include <linux/perf_event.h> #include "x86.h" #include "cpuid.h" #include "lapic.h" static struct kvm_arch_event_perf_mapping { u8 eventsel; u8 unit_mask; unsigned event_type; bool inexact; } arch_events[] = { /* Index must match CPUID 0x0A.EBX bit vector */ [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, }; /* mapping between fixed pmc index and arch_events array */ int fixed_pmc_events[] = {1, 0, 7}; static bool pmc_is_gp(struct kvm_pmc *pmc) { return pmc->type == KVM_PMC_GP; } static inline u64 pmc_bitmask(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; return pmu->counter_bitmask[pmc->type]; } static inline bool pmc_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); } static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base) { if (msr >= base && msr < base + pmu->nr_arch_gp_counters) return &pmu->gp_counters[msr - base]; return NULL; } static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) { int base = MSR_CORE_PERF_FIXED_CTR0; if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) return &pmu->fixed_counters[msr - base]; return NULL; } static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx) { return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx); } static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) { if (idx < INTEL_PMC_IDX_FIXED) return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0); else return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED); } void kvm_deliver_pmi(struct kvm_vcpu *vcpu) { if (vcpu->arch.apic) kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); } static void trigger_pmi(struct irq_work *irq_work) { struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu, arch.pmu); kvm_deliver_pmi(vcpu); } static void kvm_perf_overflow(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs) { struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); } static void kvm_perf_overflow_intr(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs) { struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { kvm_perf_overflow(perf_event, data, regs); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); /* * Inject PMI. If vcpu was in a guest mode during NMI PMI * can be ejected on a guest mode re-entry. Otherwise we can't * be sure that vcpu wasn't executing hlt instruction at the * time of vmexit and is not going to re-enter guest mode until, * woken up. So we should wake it, but this is impossible from * NMI context. Do it from irq work instead. */ if (!kvm_is_in_guest()) irq_work_queue(&pmc->vcpu->arch.pmu.irq_work); else kvm_make_request(KVM_REQ_PMI, pmc->vcpu); } } static u64 read_pmc(struct kvm_pmc *pmc) { u64 counter, enabled, running; counter = pmc->counter; if (pmc->perf_event) counter += perf_event_read_value(pmc->perf_event, &enabled, &running); /* FIXME: Scaling needed? */ return counter & pmc_bitmask(pmc); } static void stop_counter(struct kvm_pmc *pmc) { if (pmc->perf_event) { pmc->counter = read_pmc(pmc); perf_event_release_kernel(pmc->perf_event); pmc->perf_event = NULL; } } static void reprogram_counter(struct kvm_pmc *pmc, u32 type, unsigned config, bool exclude_user, bool exclude_kernel, bool intr) { struct perf_event *event; struct perf_event_attr attr = { .type = type, .size = sizeof(attr), .pinned = true, .exclude_idle = true, .exclude_host = 1, .exclude_user = exclude_user, .exclude_kernel = exclude_kernel, .config = config, }; attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); event = perf_event_create_kernel_counter(&attr, -1, current, intr ? kvm_perf_overflow_intr : kvm_perf_overflow, pmc); if (IS_ERR(event)) { printk_once("kvm: pmu event creation failed %ld\n", PTR_ERR(event)); return; } pmc->perf_event = event; clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi); } static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select, u8 unit_mask) { int i; for (i = 0; i < ARRAY_SIZE(arch_events); i++) if (arch_events[i].eventsel == event_select && arch_events[i].unit_mask == unit_mask && (pmu->available_event_types & (1 << i))) break; if (i == ARRAY_SIZE(arch_events)) return PERF_COUNT_HW_MAX; return arch_events[i].event_type; } static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { unsigned config, type = PERF_TYPE_RAW; u8 event_select, unit_mask; if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) printk_once("kvm pmu: pin control bit is ignored\n"); pmc->eventsel = eventsel; stop_counter(pmc); if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc)) return; event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | ARCH_PERFMON_EVENTSEL_CMASK))) { config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, unit_mask); if (config != PERF_COUNT_HW_MAX) type = PERF_TYPE_HARDWARE; } if (type == PERF_TYPE_RAW) config = eventsel & X86_RAW_EVENT_MASK; reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), !(eventsel & ARCH_PERFMON_EVENTSEL_OS), eventsel & ARCH_PERFMON_EVENTSEL_INT); } static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) { unsigned en = en_pmi & 0x3; bool pmi = en_pmi & 0x8; stop_counter(pmc); if (!en || !pmc_enabled(pmc)) return; reprogram_counter(pmc, PERF_TYPE_HARDWARE, arch_events[fixed_pmc_events[idx]].event_type, !(en & 0x2), /* exclude user */ !(en & 0x1), /* exclude kernel */ pmi); } static inline u8 fixed_en_pmi(u64 ctrl, int idx) { return (ctrl >> (idx * 4)) & 0xf; } static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) { int i; for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { u8 en_pmi = fixed_en_pmi(data, i); struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i); if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi) continue; reprogram_fixed_counter(pmc, en_pmi, i); } pmu->fixed_ctr_ctrl = data; } static void reprogram_idx(struct kvm_pmu *pmu, int idx) { struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx); if (!pmc) return; if (pmc_is_gp(pmc)) reprogram_gp_counter(pmc, pmc->eventsel); else { int fidx = idx - INTEL_PMC_IDX_FIXED; reprogram_fixed_counter(pmc, fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx); } } static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) { int bit; u64 diff = pmu->global_ctrl ^ data; pmu->global_ctrl = data; for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) reprogram_idx(pmu, bit); } bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = &vcpu->arch.pmu; int ret; switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_GLOBAL_STATUS: case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_OVF_CTRL: ret = pmu->version > 1; break; default: ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || get_fixed_pmc(pmu, msr); break; } return ret; } int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmc *pmc; switch (index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: *data = pmu->fixed_ctr_ctrl; return 0; case MSR_CORE_PERF_GLOBAL_STATUS: *data = pmu->global_status; return 0; case MSR_CORE_PERF_GLOBAL_CTRL: *data = pmu->global_ctrl; return 0; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: *data = pmu->global_ovf_ctrl; return 0; default: if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || (pmc = get_fixed_pmc(pmu, index))) { *data = read_pmc(pmc); return 0; } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { *data = pmc->eventsel; return 0; } } return 1; } int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmc *pmc; u32 index = msr_info->index; u64 data = msr_info->data; switch (index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: if (pmu->fixed_ctr_ctrl == data) return 0; if (!(data & 0xfffffffffffff444ull)) { reprogram_fixed_counters(pmu, data); return 0; } break; case MSR_CORE_PERF_GLOBAL_STATUS: if (msr_info->host_initiated) { pmu->global_status = data; return 0; } break; /* RO MSR */ case MSR_CORE_PERF_GLOBAL_CTRL: if (pmu->global_ctrl == data) return 0; if (!(data & pmu->global_ctrl_mask)) { global_ctrl_changed(pmu, data); return 0; } break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { if (!msr_info->host_initiated) pmu->global_status &= ~data; pmu->global_ovf_ctrl = data; return 0; } break; default: if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || (pmc = get_fixed_pmc(pmu, index))) { if (!msr_info->host_initiated) data = (s64)(s32)data; pmc->counter += data - read_pmc(pmc); return 0; } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { if (data == pmc->eventsel) return 0; if (!(data & 0xffffffff00200000ull)) { reprogram_gp_counter(pmc, data); return 0; } } } return 1; } int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) { struct kvm_pmu *pmu = &vcpu->arch.pmu; bool fast_mode = pmc & (1u << 31); bool fixed = pmc & (1u << 30); struct kvm_pmc *counters; u64 ctr; pmc &= ~(3u << 30); if (!fixed && pmc >= pmu->nr_arch_gp_counters) return 1; if (fixed && pmc >= pmu->nr_arch_fixed_counters) return 1; counters = fixed ? pmu->fixed_counters : pmu->gp_counters; ctr = read_pmc(&counters[pmc]); if (fast_mode) ctr = (u32)ctr; *data = ctr; return 0; } void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_cpuid_entry2 *entry; unsigned bitmap_len; pmu->nr_arch_gp_counters = 0; pmu->nr_arch_fixed_counters = 0; pmu->counter_bitmask[KVM_PMC_GP] = 0; pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) return; pmu->version = entry->eax & 0xff; if (!pmu->version) return; pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff, INTEL_PMC_MAX_GENERIC); pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1; bitmap_len = (entry->eax >> 24) & 0xff; pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); if (pmu->version == 1) { pmu->nr_arch_fixed_counters = 0; } else { pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), INTEL_PMC_MAX_FIXED); pmu->counter_bitmask[KVM_PMC_FIXED] = ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; } pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; } void kvm_pmu_init(struct kvm_vcpu *vcpu) { int i; struct kvm_pmu *pmu = &vcpu->arch.pmu; memset(pmu, 0, sizeof(*pmu)); for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].idx = i; } for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].vcpu = vcpu; pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; } init_irq_work(&pmu->irq_work, trigger_pmi); kvm_pmu_cpuid_update(vcpu); } void kvm_pmu_reset(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; int i; irq_work_sync(&pmu->irq_work); for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { struct kvm_pmc *pmc = &pmu->gp_counters[i]; stop_counter(pmc); pmc->counter = pmc->eventsel = 0; } for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) stop_counter(&pmu->fixed_counters[i]); pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = pmu->global_ovf_ctrl = 0; } void kvm_pmu_destroy(struct kvm_vcpu *vcpu) { kvm_pmu_reset(vcpu); } void kvm_handle_pmu_event(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = &vcpu->arch.pmu; u64 bitmask; int bit; bitmask = pmu->reprogram_pmi; for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); if (unlikely(!pmc || !pmc->perf_event)) { clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); continue; } reprogram_idx(pmu, bit); } }
gpl-2.0
Happy-Ferret/Kernel-Experiments
crypto/khazad.c
2339
52992
/* * Cryptographic API. * * Khazad Algorithm * * The Khazad algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. It was a finalist in the NESSIE encryption contest. * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, August 1, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define KHAZAD_KEY_SIZE 16 #define KHAZAD_BLOCK_SIZE 8 #define KHAZAD_ROUNDS 8 struct khazad_ctx { u64 E[KHAZAD_ROUNDS + 1]; u64 D[KHAZAD_ROUNDS + 1]; }; static const u64 T0[256] = { 0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL, 0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL, 0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL, 0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL, 0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL, 0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL, 0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL, 0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL, 0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL, 0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL, 0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL, 0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL, 0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL, 0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL, 0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL, 0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL, 0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL, 0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL, 0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL, 0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL, 0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL, 0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL, 0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL, 0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL, 0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL, 0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL, 0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL, 0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL, 0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL, 0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL, 0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL, 0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL, 0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL, 0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL, 0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL, 0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL, 0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL, 0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL, 0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL, 0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL, 0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL, 0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL, 0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL, 0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL, 0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL, 0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL, 0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL, 0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL, 0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL, 0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL, 0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL, 0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL, 0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL, 0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL, 0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL, 0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL, 0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL, 0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL, 0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL, 0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL, 0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL, 0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL, 0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL, 0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL, 0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL, 0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL, 0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL, 0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL, 0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL, 0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL, 0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL, 0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL, 0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL, 0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL, 0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL, 0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL, 0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL, 0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL, 0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL, 0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL, 0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL, 0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL, 0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL, 0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL, 0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL, 0x42c61557912aecd3ULL }; static const u64 T1[256] = { 0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL, 0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL, 0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL, 0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL, 0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL, 0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL, 0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL, 0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL, 0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL, 0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL, 0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL, 0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL, 0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL, 0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL, 0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL, 0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL, 0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL, 0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL, 0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL, 0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL, 0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL, 0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL, 0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL, 0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL, 0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL, 0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL, 0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL, 0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL, 0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL, 0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL, 0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL, 0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL, 0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL, 0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL, 0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL, 0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL, 0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL, 0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL, 0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL, 0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL, 0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL, 0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL, 0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL, 0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL, 0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL, 0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL, 0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL, 0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL, 0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL, 0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL, 0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL, 0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL, 0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL, 0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL, 0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL, 0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL, 0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL, 0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL, 0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL, 0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL, 0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL, 0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL, 0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL, 0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL, 0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL, 0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL, 0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL, 0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL, 0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL, 0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL, 0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL, 0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL, 0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL, 0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL, 0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL, 0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL, 0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL, 0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL, 0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL, 0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL, 0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL, 0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL, 0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL, 0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL, 0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL, 0xc64257152a91d3ecULL }; static const u64 T2[256] = { 0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL, 0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL, 0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL, 0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL, 0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL, 0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL, 0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL, 0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL, 0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL, 0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL, 0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL, 0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL, 0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL, 0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL, 0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL, 0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL, 0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL, 0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL, 0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL, 0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL, 0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL, 0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL, 0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL, 0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL, 0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL, 0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL, 0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL, 0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL, 0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL, 0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL, 0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL, 0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL, 0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL, 0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL, 0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL, 0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL, 0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL, 0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL, 0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL, 0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL, 0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL, 0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL, 0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL, 0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL, 0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL, 0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL, 0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL, 0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL, 0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL, 0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL, 0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL, 0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL, 0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL, 0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL, 0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL, 0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL, 0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL, 0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL, 0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL, 0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL, 0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL, 0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL, 0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL, 0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL, 0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL, 0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL, 0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL, 0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL, 0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL, 0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL, 0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL, 0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL, 0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL, 0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL, 0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL, 0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL, 0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL, 0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL, 0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL, 0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL, 0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL, 0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL, 0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL, 0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL, 0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL, 0x155742c6ecd3912aULL }; static const u64 T3[256] = { 0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL, 0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL, 0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL, 0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL, 0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL, 0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL, 0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL, 0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL, 0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL, 0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL, 0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL, 0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL, 0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL, 0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL, 0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL, 0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL, 0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL, 0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL, 0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL, 0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL, 0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL, 0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL, 0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL, 0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL, 0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL, 0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL, 0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL, 0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL, 0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL, 0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL, 0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL, 0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL, 0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL, 0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL, 0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL, 0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL, 0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL, 0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL, 0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL, 0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL, 0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL, 0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL, 0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL, 0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL, 0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL, 0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL, 0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL, 0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL, 0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL, 0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL, 0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL, 0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL, 0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL, 0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL, 0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL, 0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL, 0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL, 0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL, 0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL, 0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL, 0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL, 0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL, 0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL, 0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL, 0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL, 0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL, 0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL, 0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL, 0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL, 0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL, 0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL, 0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL, 0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL, 0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL, 0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL, 0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL, 0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL, 0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL, 0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL, 0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL, 0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL, 0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL, 0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL, 0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL, 0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL, 0x5715c642d3ec2a91ULL }; static const u64 T4[256] = { 0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL, 0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL, 0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL, 0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL, 0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL, 0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL, 0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL, 0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL, 0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL, 0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL, 0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL, 0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL, 0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL, 0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL, 0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL, 0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL, 0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL, 0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL, 0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL, 0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL, 0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL, 0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL, 0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL, 0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL, 0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL, 0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL, 0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL, 0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL, 0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL, 0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL, 0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL, 0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL, 0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL, 0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL, 0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL, 0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL, 0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL, 0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL, 0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL, 0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL, 0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL, 0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL, 0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL, 0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL, 0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL, 0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL, 0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL, 0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL, 0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL, 0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL, 0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL, 0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL, 0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL, 0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL, 0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL, 0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL, 0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL, 0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL, 0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL, 0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL, 0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL, 0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL, 0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL, 0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL, 0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL, 0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL, 0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL, 0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL, 0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL, 0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL, 0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL, 0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL, 0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL, 0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL, 0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL, 0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL, 0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL, 0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL, 0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL, 0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL, 0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL, 0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL, 0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL, 0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL, 0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL, 0x912aecd342c61557ULL }; static const u64 T5[256] = { 0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL, 0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL, 0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL, 0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL, 0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL, 0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL, 0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL, 0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL, 0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL, 0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL, 0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL, 0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL, 0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL, 0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL, 0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL, 0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL, 0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL, 0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL, 0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL, 0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL, 0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL, 0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL, 0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL, 0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL, 0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL, 0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL, 0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL, 0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL, 0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL, 0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL, 0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL, 0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL, 0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL, 0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL, 0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL, 0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL, 0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL, 0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL, 0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL, 0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL, 0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL, 0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL, 0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL, 0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL, 0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL, 0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL, 0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL, 0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL, 0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL, 0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL, 0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL, 0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL, 0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL, 0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL, 0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL, 0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL, 0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL, 0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL, 0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL, 0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL, 0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL, 0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL, 0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL, 0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL, 0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL, 0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL, 0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL, 0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL, 0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL, 0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL, 0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL, 0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL, 0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL, 0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL, 0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL, 0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL, 0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL, 0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL, 0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL, 0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL, 0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL, 0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL, 0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL, 0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL, 0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL, 0x2a91d3ecc6425715ULL }; static const u64 T6[256] = { 0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL, 0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL, 0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL, 0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL, 0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL, 0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL, 0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL, 0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL, 0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL, 0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL, 0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL, 0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL, 0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL, 0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL, 0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL, 0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL, 0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL, 0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL, 0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL, 0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL, 0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL, 0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL, 0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL, 0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL, 0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL, 0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL, 0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL, 0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL, 0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL, 0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL, 0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL, 0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL, 0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL, 0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL, 0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL, 0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL, 0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL, 0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL, 0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL, 0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL, 0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL, 0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL, 0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL, 0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL, 0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL, 0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL, 0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL, 0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL, 0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL, 0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL, 0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL, 0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL, 0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL, 0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL, 0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL, 0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL, 0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL, 0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL, 0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL, 0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL, 0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL, 0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL, 0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL, 0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL, 0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL, 0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL, 0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL, 0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL, 0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL, 0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL, 0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL, 0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL, 0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL, 0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL, 0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL, 0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL, 0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL, 0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL, 0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL, 0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL, 0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL, 0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL, 0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL, 0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL, 0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL, 0xecd3912a155742c6ULL }; static const u64 T7[256] = { 0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL, 0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL, 0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL, 0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL, 0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL, 0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL, 0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL, 0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL, 0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL, 0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL, 0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL, 0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL, 0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL, 0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL, 0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL, 0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL, 0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL, 0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL, 0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL, 0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL, 0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL, 0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL, 0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL, 0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL, 0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL, 0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL, 0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL, 0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL, 0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL, 0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL, 0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL, 0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL, 0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL, 0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL, 0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL, 0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL, 0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL, 0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL, 0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL, 0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL, 0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL, 0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL, 0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL, 0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL, 0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL, 0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL, 0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL, 0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL, 0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL, 0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL, 0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL, 0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL, 0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL, 0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL, 0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL, 0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL, 0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL, 0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL, 0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL, 0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL, 0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL, 0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL, 0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL, 0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL, 0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL, 0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL, 0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL, 0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL, 0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL, 0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL, 0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL, 0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL, 0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL, 0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL, 0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL, 0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL, 0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL, 0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL, 0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL, 0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL, 0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL, 0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL, 0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL, 0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL, 0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL, 0xd3ec2a915715c642ULL }; static const u64 c[KHAZAD_ROUNDS + 1] = { 0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL, 0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL, 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL }; static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; /* key is supposed to be 32-bit aligned */ K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); /* setup the encrypt key */ for (r = 0; r <= KHAZAD_ROUNDS; r++) { ctx->E[r] = T0[(int)(K1 >> 56) ] ^ T1[(int)(K1 >> 48) & 0xff] ^ T2[(int)(K1 >> 40) & 0xff] ^ T3[(int)(K1 >> 32) & 0xff] ^ T4[(int)(K1 >> 24) & 0xff] ^ T5[(int)(K1 >> 16) & 0xff] ^ T6[(int)(K1 >> 8) & 0xff] ^ T7[(int)(K1 ) & 0xff] ^ c[r] ^ K2; K2 = K1; K1 = ctx->E[r]; } /* Setup the decrypt key */ ctx->D[0] = ctx->E[KHAZAD_ROUNDS]; for (r = 1; r < KHAZAD_ROUNDS; r++) { K1 = ctx->E[KHAZAD_ROUNDS - r]; ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^ T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^ T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^ T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^ T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^ T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^ T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^ T7[(int)S[(int)(K1 ) & 0xff] & 0xff]; } ctx->D[KHAZAD_ROUNDS] = ctx->E[0]; return 0; } static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], u8 *ciphertext, const u8 *plaintext) { const __be64 *src = (const __be64 *)plaintext; __be64 *dst = (__be64 *)ciphertext; int r; u64 state; state = be64_to_cpu(*src) ^ roundKey[0]; for (r = 1; r < KHAZAD_ROUNDS; r++) { state = T0[(int)(state >> 56) ] ^ T1[(int)(state >> 48) & 0xff] ^ T2[(int)(state >> 40) & 0xff] ^ T3[(int)(state >> 32) & 0xff] ^ T4[(int)(state >> 24) & 0xff] ^ T5[(int)(state >> 16) & 0xff] ^ T6[(int)(state >> 8) & 0xff] ^ T7[(int)(state ) & 0xff] ^ roundKey[r]; } state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^ (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^ (T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^ (T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^ (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^ (T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^ (T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^ (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ roundKey[KHAZAD_ROUNDS]; *dst = cpu_to_be64(state); } static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); khazad_crypt(ctx->E, dst, src); } static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); khazad_crypt(ctx->D, dst, src); } static struct crypto_alg khazad_alg = { .cra_name = "khazad", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = KHAZAD_BLOCK_SIZE, .cra_ctxsize = sizeof (struct khazad_ctx), .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = KHAZAD_KEY_SIZE, .cia_max_keysize = KHAZAD_KEY_SIZE, .cia_setkey = khazad_setkey, .cia_encrypt = khazad_encrypt, .cia_decrypt = khazad_decrypt } } }; static int __init khazad_mod_init(void) { int ret = 0; ret = crypto_register_alg(&khazad_alg); return ret; } static void __exit khazad_mod_fini(void) { crypto_unregister_alg(&khazad_alg); } module_init(khazad_mod_init); module_exit(khazad_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
gpl-2.0
boype/kernel_tuna_jb43
drivers/media/dvb/frontends/zl10036.c
3107
12236
/** * Driver for Zarlink zl10036 DVB-S silicon tuner * * Copyright (C) 2006 Tino Reichardt * Copyright (C) 2007-2009 Matthias Schwarzott <zzam@gentoo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ** * The data sheet for this tuner can be found at: * http://www.mcmilk.de/projects/dvb-card/datasheets/ZL10036.pdf * * This one is working: (at my Avermedia DVB-S Pro) * - zl10036 (40pin, FTA) * * A driver for zl10038 should be very similar. */ #include <linux/module.h> #include <linux/dvb/frontend.h> #include <linux/slab.h> #include <linux/types.h> #include "zl10036.h" static int zl10036_debug; #define dprintk(level, args...) \ do { if (zl10036_debug & level) printk(KERN_DEBUG "zl10036: " args); \ } while (0) #define deb_info(args...) dprintk(0x01, args) #define deb_i2c(args...) dprintk(0x02, args) struct zl10036_state { struct i2c_adapter *i2c; const struct zl10036_config *config; u32 frequency; u8 br, bf; }; /* This driver assumes the tuner is driven by a 10.111MHz Cristal */ #define _XTAL 10111 /* Some of the possible dividers: * 64, (write 0x05 to reg), freq step size 158kHz * 10, (write 0x0a to reg), freq step size 1.011kHz (used here) * 5, (write 0x09 to reg), freq step size 2.022kHz */ #define _RDIV 10 #define _RDIV_REG 0x0a #define _FR (_XTAL/_RDIV) #define STATUS_POR 0x80 /* Power on Reset */ #define STATUS_FL 0x40 /* Frequency & Phase Lock */ /* read/write for zl10036 and zl10038 */ static int zl10036_read_status_reg(struct zl10036_state *state) { u8 status; struct i2c_msg msg[1] = { { .addr = state->config->tuner_address, .flags = I2C_M_RD, .buf = &status, .len = sizeof(status) }, }; if (i2c_transfer(state->i2c, msg, 1) != 1) { printk(KERN_ERR "%s: i2c read failed at addr=%02x\n", __func__, state->config->tuner_address); return -EIO; } deb_i2c("R(status): %02x [FL=%d]\n", status, (status & STATUS_FL) ? 1 : 0); if (status & STATUS_POR) deb_info("%s: Power-On-Reset bit enabled - " "need to initialize the tuner\n", __func__); return status; } static int zl10036_write(struct zl10036_state *state, u8 buf[], u8 count) { struct i2c_msg msg[1] = { { .addr = state->config->tuner_address, .flags = 0, .buf = buf, .len = count }, }; u8 reg = 0; int ret; if (zl10036_debug & 0x02) { /* every 8bit-value satisifes this! * so only check for debug log */ if ((buf[0] & 0x80) == 0x00) reg = 2; else if ((buf[0] & 0xc0) == 0x80) reg = 4; else if ((buf[0] & 0xf0) == 0xc0) reg = 6; else if ((buf[0] & 0xf0) == 0xd0) reg = 8; else if ((buf[0] & 0xf0) == 0xe0) reg = 10; else if ((buf[0] & 0xf0) == 0xf0) reg = 12; deb_i2c("W(%d):", reg); { int i; for (i = 0; i < count; i++) printk(KERN_CONT " %02x", buf[i]); printk(KERN_CONT "\n"); } } ret = i2c_transfer(state->i2c, msg, 1); if (ret != 1) { printk(KERN_ERR "%s: i2c error, ret=%d\n", __func__, ret); return -EIO; } return 0; } static int zl10036_release(struct dvb_frontend *fe) { struct zl10036_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } static int zl10036_sleep(struct dvb_frontend *fe) { struct zl10036_state *state = fe->tuner_priv; u8 buf[] = { 0xf0, 0x80 }; /* regs 12/13 */ int ret; deb_info("%s\n", __func__); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_write(state, buf, sizeof(buf)); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } /** * register map of the ZL10036/ZL10038 * * reg[default] content * 2[0x00]: 0 | N14 | N13 | N12 | N11 | N10 | N9 | N8 * 3[0x00]: N7 | N6 | N5 | N4 | N3 | N2 | N1 | N0 * 4[0x80]: 1 | 0 | RFG | BA1 | BA0 | BG1 | BG0 | LEN * 5[0x00]: P0 | C1 | C0 | R4 | R3 | R2 | R1 | R0 * 6[0xc0]: 1 | 1 | 0 | 0 | RSD | 0 | 0 | 0 * 7[0x20]: P1 | BF6 | BF5 | BF4 | BF3 | BF2 | BF1 | 0 * 8[0xdb]: 1 | 1 | 0 | 1 | 0 | CC | 1 | 1 * 9[0x30]: VSD | V2 | V1 | V0 | S3 | S2 | S1 | S0 * 10[0xe1]: 1 | 1 | 1 | 0 | 0 | LS2 | LS1 | LS0 * 11[0xf5]: WS | WH2 | WH1 | WH0 | WL2 | WL1 | WL0 | WRE * 12[0xf0]: 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 * 13[0x28]: PD | BR4 | BR3 | BR2 | BR1 | BR0 | CLR | TL */ static int zl10036_set_frequency(struct zl10036_state *state, u32 frequency) { u8 buf[2]; u32 div, foffset; div = (frequency + _FR/2) / _FR; state->frequency = div * _FR; foffset = frequency - state->frequency; buf[0] = (div >> 8) & 0x7f; buf[1] = (div >> 0) & 0xff; deb_info("%s: ftodo=%u fpriv=%u ferr=%d div=%u\n", __func__, frequency, state->frequency, foffset, div); return zl10036_write(state, buf, sizeof(buf)); } static int zl10036_set_bandwidth(struct zl10036_state *state, u32 fbw) { /* fbw is measured in kHz */ u8 br, bf; int ret; u8 buf_bf[] = { 0xc0, 0x00, /* 6/7: rsd=0 bf=0 */ }; u8 buf_br[] = { 0xf0, 0x00, /* 12/13: br=0xa clr=0 tl=0*/ }; u8 zl10036_rsd_off[] = { 0xc8 }; /* set RSD=1 */ /* ensure correct values */ if (fbw > 35000) fbw = 35000; if (fbw < 8000) fbw = 8000; #define _BR_MAXIMUM (_XTAL/575) /* _XTAL / 575kHz = 17 */ /* <= 28,82 MHz */ if (fbw <= 28820) { br = _BR_MAXIMUM; } else { /** * f(bw)=34,6MHz f(xtal)=10.111MHz * br = (10111/34600) * 63 * 1/K = 14; */ br = ((_XTAL * 21 * 1000) / (fbw * 419)); } /* ensure correct values */ if (br < 4) br = 4; if (br > _BR_MAXIMUM) br = _BR_MAXIMUM; /* * k = 1.257 * bf = fbw/_XTAL * br * k - 1 */ bf = (fbw * br * 1257) / (_XTAL * 1000) - 1; /* ensure correct values */ if (bf > 62) bf = 62; buf_bf[1] = (bf << 1) & 0x7e; buf_br[1] = (br << 2) & 0x7c; deb_info("%s: BW=%d br=%u bf=%u\n", __func__, fbw, br, bf); if (br != state->br) { ret = zl10036_write(state, buf_br, sizeof(buf_br)); if (ret < 0) return ret; } if (bf != state->bf) { ret = zl10036_write(state, buf_bf, sizeof(buf_bf)); if (ret < 0) return ret; /* time = br/(32* fxtal) */ /* minimal sleep time to be calculated * maximum br is 63 -> max time = 2 /10 MHz = 2e-7 */ msleep(1); ret = zl10036_write(state, zl10036_rsd_off, sizeof(zl10036_rsd_off)); if (ret < 0) return ret; } state->br = br; state->bf = bf; return 0; } static int zl10036_set_gain_params(struct zl10036_state *state, int c) { u8 buf[2]; u8 rfg, ba, bg; /* default values */ rfg = 0; /* enable when using an lna */ ba = 1; bg = 1; /* reg 4 */ buf[0] = 0x80 | ((rfg << 5) & 0x20) | ((ba << 3) & 0x18) | ((bg << 1) & 0x06); if (!state->config->rf_loop_enable) buf[0] |= 0x01; /* P0=0 */ buf[1] = _RDIV_REG | ((c << 5) & 0x60); deb_info("%s: c=%u rfg=%u ba=%u bg=%u\n", __func__, c, rfg, ba, bg); return zl10036_write(state, buf, sizeof(buf)); } static int zl10036_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct zl10036_state *state = fe->tuner_priv; int ret = 0; u32 frequency = params->frequency; u32 fbw; int i; u8 c; /* ensure correct values * maybe redundant as core already checks this */ if ((frequency < fe->ops.info.frequency_min) || (frequency > fe->ops.info.frequency_max)) return -EINVAL; /** * alpha = 1.35 for dvb-s * fBW = (alpha*symbolrate)/(2*0.8) * 1.35 / (2*0.8) = 27 / 32 */ fbw = (27 * params->u.qpsk.symbol_rate) / 32; /* scale to kHz */ fbw /= 1000; /* Add safe margin of 3MHz */ fbw += 3000; /* setting the charge pump - guessed values */ if (frequency < 950000) return -EINVAL; else if (frequency < 1250000) c = 0; else if (frequency < 1750000) c = 1; else if (frequency < 2175000) c = 2; else return -EINVAL; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_set_gain_params(state, c); if (ret < 0) goto error; ret = zl10036_set_frequency(state, params->frequency); if (ret < 0) goto error; ret = zl10036_set_bandwidth(state, fbw); if (ret < 0) goto error; /* wait for tuner lock - no idea if this is really needed */ for (i = 0; i < 20; i++) { ret = zl10036_read_status_reg(state); if (ret < 0) goto error; /* check Frequency & Phase Lock Bit */ if (ret & STATUS_FL) break; msleep(10); } error: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static int zl10036_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct zl10036_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int zl10036_init_regs(struct zl10036_state *state) { int ret; int i; /* could also be one block from reg 2 to 13 and additional 10/11 */ u8 zl10036_init_tab[][2] = { { 0x04, 0x00 }, /* 2/3: div=0x400 - arbitrary value */ { 0x8b, _RDIV_REG }, /* 4/5: rfg=0 ba=1 bg=1 len=? */ /* p0=0 c=0 r=_RDIV_REG */ { 0xc0, 0x20 }, /* 6/7: rsd=0 bf=0x10 */ { 0xd3, 0x40 }, /* 8/9: from datasheet */ { 0xe3, 0x5b }, /* 10/11: lock window level */ { 0xf0, 0x28 }, /* 12/13: br=0xa clr=0 tl=0*/ { 0xe3, 0xf9 }, /* 10/11: unlock window level */ }; /* invalid values to trigger writing */ state->br = 0xff; state->bf = 0xff; if (!state->config->rf_loop_enable) zl10036_init_tab[1][0] |= 0x01; deb_info("%s\n", __func__); for (i = 0; i < ARRAY_SIZE(zl10036_init_tab); i++) { ret = zl10036_write(state, zl10036_init_tab[i], 2); if (ret < 0) return ret; } return 0; } static int zl10036_init(struct dvb_frontend *fe) { struct zl10036_state *state = fe->tuner_priv; int ret = 0; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_read_status_reg(state); if (ret < 0) return ret; /* Only init if Power-on-Reset bit is set? */ ret = zl10036_init_regs(state); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static struct dvb_tuner_ops zl10036_tuner_ops = { .info = { .name = "Zarlink ZL10036", .frequency_min = 950000, .frequency_max = 2175000 }, .init = zl10036_init, .release = zl10036_release, .sleep = zl10036_sleep, .set_params = zl10036_set_params, .get_frequency = zl10036_get_frequency, }; struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe, const struct zl10036_config *config, struct i2c_adapter *i2c) { struct zl10036_state *state; int ret; if (!config) { printk(KERN_ERR "%s: no config specified", __func__); return NULL; } state = kzalloc(sizeof(struct zl10036_state), GFP_KERNEL); if (!state) return NULL; state->config = config; state->i2c = i2c; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_read_status_reg(state); if (ret < 0) { printk(KERN_ERR "%s: No zl10036 found\n", __func__); goto error; } ret = zl10036_init_regs(state); if (ret < 0) { printk(KERN_ERR "%s: tuner initialization failed\n", __func__); goto error; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ fe->tuner_priv = state; memcpy(&fe->ops.tuner_ops, &zl10036_tuner_ops, sizeof(struct dvb_tuner_ops)); printk(KERN_INFO "%s: tuner initialization (%s addr=0x%02x) ok\n", __func__, fe->ops.tuner_ops.info.name, config->tuner_address); return fe; error: kfree(state); return NULL; } EXPORT_SYMBOL(zl10036_attach); module_param_named(debug, zl10036_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("DVB ZL10036 driver"); MODULE_AUTHOR("Tino Reichardt"); MODULE_AUTHOR("Matthias Schwarzott"); MODULE_LICENSE("GPL");
gpl-2.0
y10g/lge-kernel-startablet-l06c
arch/alpha/kernel/pci-noop.c
3875
4172
/* * linux/arch/alpha/kernel/pci-noop.c * * Stub PCI interfaces for Jensen-specific kernels. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/gfp.h> #include <linux/capability.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include "proto.h" /* * The PCI controller list. */ struct pci_controller *hose_head, **hose_tail = &hose_head; struct pci_controller *pci_isa_hose; struct pci_controller * __init alloc_pci_controller(void) { struct pci_controller *hose; hose = alloc_bootmem(sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; return hose; } struct resource * __init alloc_resource(void) { struct resource *res; res = alloc_bootmem(sizeof(*res)); return res; } asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) { struct pci_controller *hose; /* from hose or from bus.devfn */ if (which & IOBASE_FROM_HOSE) { for (hose = hose_head; hose; hose = hose->next) if (hose->index == bus) break; if (!hose) return -ENODEV; } else { /* Special hook for ISA access. */ if (bus == 0 && dfn == 0) hose = pci_isa_hose; else return -ENODEV; } switch (which & ~IOBASE_FROM_HOSE) { case IOBASE_HOSE: return hose->index; case IOBASE_SPARSE_MEM: return hose->sparse_mem_base; case IOBASE_DENSE_MEM: return hose->dense_mem_base; case IOBASE_SPARSE_IO: return hose->sparse_io_base; case IOBASE_DENSE_IO: return hose->dense_io_base; case IOBASE_ROOT_BUS: return hose->bus->number; } return -EOPNOTSUPP; } asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void *buf) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; else return -ENODEV; } asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void *buf) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; else return -ENODEV; } static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret; if (!dev || *dev->dma_mask >= 0xffffffffUL) gfp &= ~GFP_DMA; ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; } static void alpha_noop_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) { free_pages((unsigned long)cpu_addr, get_order(size)); } static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { return page_to_pa(page) + offset; } static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { int i; struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { void *va; BUG_ON(!sg_page(sg)); va = sg_virt(sg); sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); sg_dma_len(sg) = sg->length; } return nents; } static int alpha_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } static int alpha_noop_supported(struct device *dev, u64 mask) { return mask < 0x00ffffffUL ? 0 : 1; } static int alpha_noop_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } struct dma_map_ops alpha_noop_ops = { .alloc_coherent = alpha_noop_alloc_coherent, .free_coherent = alpha_noop_free_coherent, .map_page = alpha_noop_map_page, .map_sg = alpha_noop_map_sg, .mapping_error = alpha_noop_mapping_error, .dma_supported = alpha_noop_supported, .set_dma_mask = alpha_noop_set_mask, }; struct dma_map_ops *dma_ops = &alpha_noop_ops; EXPORT_SYMBOL(dma_ops); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { return NULL; } void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { } EXPORT_SYMBOL(pci_iomap); EXPORT_SYMBOL(pci_iounmap);
gpl-2.0
GameTheory-/android_kernel_d505
arch/x86/pci/amd_bus.c
4643
9934
#include <linux/init.h> #include <linux/pci.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/range.h> #include <asm/amd_nb.h> #include <asm/pci_x86.h> #include <asm/pci-direct.h> #include "bus_numa.h" /* * This discovers the pcibus <-> node mapping on AMD K8. * also get peer root bus resource for io,mmio */ struct pci_hostbridge_probe { u32 bus; u32 slot; u32 vendor; u32 device; }; static struct pci_hostbridge_probe pci_probes[] __initdata = { { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1100 }, { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 }, }; #define RANGE_NUM 16 /** * early_fill_mp_bus_to_node() * called before pcibios_scan_root and pci_scan_bus * fills the mp_bus_to_cpumask array based according to the LDT Bus Number * Registers found in the K8 northbridge */ static int __init early_fill_mp_bus_info(void) { int i; int j; unsigned bus; unsigned slot; int node; int link; int def_node; int def_link; struct pci_root_info *info; u32 reg; struct resource *res; u64 start; u64 end; struct range range[RANGE_NUM]; u64 val; u32 address; bool found; struct resource fam10h_mmconf_res, *fam10h_mmconf; u64 fam10h_mmconf_start; u64 fam10h_mmconf_end; if (!early_pci_allowed()) return -1; found = false; for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { u32 id; u16 device; u16 vendor; bus = pci_probes[i].bus; slot = pci_probes[i].slot; id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); vendor = id & 0xffff; device = (id>>16) & 0xffff; if (pci_probes[i].vendor == vendor && pci_probes[i].device == device) { found = true; break; } } if (!found) return 0; pci_root_num = 0; for (i = 0; i < 4; i++) { int min_bus; int max_bus; reg = read_pci_config(bus, slot, 1, 0xe0 + (i << 2)); /* Check if that register is enabled for bus range */ if ((reg & 7) != 3) continue; min_bus = (reg >> 16) & 0xff; max_bus = (reg >> 24) & 0xff; node = (reg >> 4) & 0x07; #ifdef CONFIG_NUMA for (j = min_bus; j <= max_bus; j++) set_mp_bus_to_node(j, node); #endif link = (reg >> 8) & 0x03; info = &pci_root_info[pci_root_num]; info->bus_min = min_bus; info->bus_max = max_bus; info->node = node; info->link = link; sprintf(info->name, "PCI Bus #%02x", min_bus); pci_root_num++; } /* get the default node and link for left over res */ reg = read_pci_config(bus, slot, 0, 0x60); def_node = (reg >> 8) & 0x07; reg = read_pci_config(bus, slot, 0, 0x64); def_link = (reg >> 8) & 0x03; memset(range, 0, sizeof(range)); add_range(range, RANGE_NUM, 0, 0, 0xffff + 1); /* io port resource */ for (i = 0; i < 4; i++) { reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3)); if (!(reg & 3)) continue; start = reg & 0xfff000; reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3)); node = reg & 0x07; link = (reg >> 4) & 0x03; end = (reg & 0xfff000) | 0xfff; /* find the position */ for (j = 0; j < pci_root_num; j++) { info = &pci_root_info[j]; if (info->node == node && info->link == link) break; } if (j == pci_root_num) continue; /* not found */ info = &pci_root_info[j]; printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n", node, link, start, end); /* kernel only handle 16 bit only */ if (end > 0xffff) end = 0xffff; update_res(info, start, end, IORESOURCE_IO, 1); subtract_range(range, RANGE_NUM, start, end + 1); } /* add left over io port range to def node/link, [0, 0xffff] */ /* find the position */ for (j = 0; j < pci_root_num; j++) { info = &pci_root_info[j]; if (info->node == def_node && info->link == def_link) break; } if (j < pci_root_num) { info = &pci_root_info[j]; for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue; update_res(info, range[i].start, range[i].end - 1, IORESOURCE_IO, 1); } } memset(range, 0, sizeof(range)); /* 0xfd00000000-0xffffffffff for HT */ end = cap_resource((0xfdULL<<32) - 1); end++; add_range(range, RANGE_NUM, 0, 0, end); /* need to take out [0, TOM) for RAM*/ address = MSR_K8_TOP_MEM1; rdmsrl(address, val); end = (val & 0xffffff800000ULL); printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20); if (end < (1ULL<<32)) subtract_range(range, RANGE_NUM, 0, end); /* get mmconfig */ fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res); /* need to take out mmconf range */ if (fam10h_mmconf) { printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf); fam10h_mmconf_start = fam10h_mmconf->start; fam10h_mmconf_end = fam10h_mmconf->end; subtract_range(range, RANGE_NUM, fam10h_mmconf_start, fam10h_mmconf_end + 1); } else { fam10h_mmconf_start = 0; fam10h_mmconf_end = 0; } /* mmio resource */ for (i = 0; i < 8; i++) { reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); if (!(reg & 3)) continue; start = reg & 0xffffff00; /* 39:16 on 31:8*/ start <<= 8; reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); node = reg & 0x07; link = (reg >> 4) & 0x03; end = (reg & 0xffffff00); end <<= 8; end |= 0xffff; /* find the position */ for (j = 0; j < pci_root_num; j++) { info = &pci_root_info[j]; if (info->node == node && info->link == link) break; } if (j == pci_root_num) continue; /* not found */ info = &pci_root_info[j]; printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]", node, link, start, end); /* * some sick allocation would have range overlap with fam10h * mmconf range, so need to update start and end. */ if (fam10h_mmconf_end) { int changed = 0; u64 endx = 0; if (start >= fam10h_mmconf_start && start <= fam10h_mmconf_end) { start = fam10h_mmconf_end + 1; changed = 1; } if (end >= fam10h_mmconf_start && end <= fam10h_mmconf_end) { end = fam10h_mmconf_start - 1; changed = 1; } if (start < fam10h_mmconf_start && end > fam10h_mmconf_end) { /* we got a hole */ endx = fam10h_mmconf_start - 1; update_res(info, start, endx, IORESOURCE_MEM, 0); subtract_range(range, RANGE_NUM, start, endx + 1); printk(KERN_CONT " ==> [%llx, %llx]", start, endx); start = fam10h_mmconf_end + 1; changed = 1; } if (changed) { if (start <= end) { printk(KERN_CONT " %s [%llx, %llx]", endx ? "and" : "==>", start, end); } else { printk(KERN_CONT "%s\n", endx?"":" ==> none"); continue; } } } update_res(info, cap_resource(start), cap_resource(end), IORESOURCE_MEM, 1); subtract_range(range, RANGE_NUM, start, end + 1); printk(KERN_CONT "\n"); } /* need to take out [4G, TOM2) for RAM*/ /* SYS_CFG */ address = MSR_K8_SYSCFG; rdmsrl(address, val); /* TOP_MEM2 is enabled? */ if (val & (1<<21)) { /* TOP_MEM2 */ address = MSR_K8_TOP_MEM2; rdmsrl(address, val); end = (val & 0xffffff800000ULL); printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20); subtract_range(range, RANGE_NUM, 1ULL<<32, end); } /* * add left over mmio range to def node/link ? * that is tricky, just record range in from start_min to 4G */ for (j = 0; j < pci_root_num; j++) { info = &pci_root_info[j]; if (info->node == def_node && info->link == def_link) break; } if (j < pci_root_num) { info = &pci_root_info[j]; for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue; update_res(info, cap_resource(range[i].start), cap_resource(range[i].end - 1), IORESOURCE_MEM, 1); } } for (i = 0; i < pci_root_num; i++) { int res_num; int busnum; info = &pci_root_info[i]; res_num = info->res_num; busnum = info->bus_min; printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n", info->bus_min, info->bus_max, info->node, info->link); for (j = 0; j < res_num; j++) { res = &info->res[j]; printk(KERN_DEBUG "bus: %02x index %x %pR\n", busnum, j, res); } } return 0; } #define ENABLE_CF8_EXT_CFG (1ULL << 46) static void __cpuinit enable_pci_io_ecs(void *unused) { u64 reg; rdmsrl(MSR_AMD64_NB_CFG, reg); if (!(reg & ENABLE_CF8_EXT_CFG)) { reg |= ENABLE_CF8_EXT_CFG; wrmsrl(MSR_AMD64_NB_CFG, reg); } } static int __cpuinit amd_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (long)hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); break; default: break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata amd_cpu_notifier = { .notifier_call = amd_cpu_notify, }; static void __init pci_enable_pci_io_ecs(void) { #ifdef CONFIG_AMD_NB unsigned int i, n; for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) { u8 bus = amd_nb_bus_dev_ranges[i].bus; u8 slot = amd_nb_bus_dev_ranges[i].dev_base; u8 limit = amd_nb_bus_dev_ranges[i].dev_limit; for (; slot < limit; ++slot) { u32 val = read_pci_config(bus, slot, 3, 0); if (!early_is_amd_nb(val)) continue; val = read_pci_config(bus, slot, 3, 0x8c); if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) { val |= ENABLE_CF8_EXT_CFG >> 32; write_pci_config(bus, slot, 3, 0x8c, val); } ++n; } } #endif } static int __init pci_io_ecs_init(void) { int cpu; /* assume all cpus from fam10h have IO ECS */ if (boot_cpu_data.x86 < 0x10) return 0; /* Try the PCI method first. */ if (early_pci_allowed()) pci_enable_pci_io_ecs(); register_cpu_notifier(&amd_cpu_notifier); for_each_online_cpu(cpu) amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, (void *)(long)cpu); pci_probe |= PCI_HAS_IO_ECS; return 0; } static int __init amd_postcore_init(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return 0; early_fill_mp_bus_info(); pci_io_ecs_init(); return 0; } postcore_initcall(amd_postcore_init);
gpl-2.0
javelinanddart/mako
arch/powerpc/platforms/wsp/smp.c
4643
2011
/* * SMP Support for A2 platforms * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/cpumask.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/smp.h> #include <asm/dbell.h> #include <asm/machdep.h> #include <asm/xics.h> #include "ics.h" #include "wsp.h" static void __devinit smp_a2_setup_cpu(int cpu) { doorbell_setup_this_cpu(); if (cpu != boot_cpuid) xics_setup_cpu(); } int __devinit smp_a2_kick_cpu(int nr) { const char *enable_method; struct device_node *np; int thr_idx; if (nr < 0 || nr >= NR_CPUS) return -ENOENT; np = of_get_cpu_node(nr, &thr_idx); if (!np) return -ENODEV; enable_method = of_get_property(np, "enable-method", NULL); pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method); if (!enable_method) { printk(KERN_ERR "CPU%d has no enable-method\n", nr); return -ENOENT; } else if (strcmp(enable_method, "ibm,a2-scom") == 0) { if (a2_scom_startup_cpu(nr, thr_idx, np)) return -1; } else { printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n", nr, enable_method); return -EINVAL; } /* * The processor is currently spinning, waiting for the * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; return 0; } static int __init smp_a2_probe(void) { return num_possible_cpus(); } static struct smp_ops_t a2_smp_ops = { .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ .cause_ipi = doorbell_cause_ipi, .probe = smp_a2_probe, .kick_cpu = smp_a2_kick_cpu, .setup_cpu = smp_a2_setup_cpu, }; void __init a2_setup_smp(void) { smp_ops = &a2_smp_ops; }
gpl-2.0
sohkis/android_kernel_lge_hammerhead
drivers/staging/rtl8192e/rtllib_softmac_wx.c
4899
15092
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Some pieces of code might be stolen from ipw2100 driver * copyright of who own it's copyright ;-) * * PS wx handler mostly stolen from hostap, copyright who * own it's copyright ;-) * * released under the GPL */ #include "rtllib.h" #include "dot11d.h" /* FIXME: add A freqs */ const long rtllib_wlan_frequencies[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; EXPORT_SYMBOL(rtllib_wlan_frequencies); int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct iw_freq *fwrq = &wrqu->freq; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_INFRA) { ret = 0; goto out; } /* if setting by freq convert to channel */ if (fwrq->e == 1) { if ((fwrq->m >= (int) 2.412e8 && fwrq->m <= (int) 2.487e8)) { int f = fwrq->m / 100000; int c = 0; while ((c < 14) && (f != rtllib_wlan_frequencies[c])) c++; /* hack to fall through */ fwrq->e = 0; fwrq->m = c + 1; } } if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) { ret = -EOPNOTSUPP; goto out; } else { /* Set the channel */ if (ieee->active_channel_map[fwrq->m] != 1) { ret = -EINVAL; goto out; } ieee->current_network.channel = fwrq->m; ieee->set_chan(ieee->dev, ieee->current_network.channel); if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) if (ieee->state == RTLLIB_LINKED) { rtllib_stop_send_beacons(ieee); rtllib_start_send_beacons(ieee); } } ret = 0; out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(rtllib_wx_set_freq); int rtllib_wx_get_freq(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct iw_freq *fwrq = &wrqu->freq; if (ieee->current_network.channel == 0) return -1; fwrq->m = rtllib_wlan_frequencies[ieee->current_network.channel-1] * 100000; fwrq->e = 1; return 0; } EXPORT_SYMBOL(rtllib_wx_get_freq); int rtllib_wx_get_wap(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { unsigned long flags; wrqu->ap_addr.sa_family = ARPHRD_ETHER; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->state != RTLLIB_LINKED && ieee->state != RTLLIB_LINKED_SCANNING && ieee->wap_set == 0) memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); else memcpy(wrqu->ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); spin_unlock_irqrestore(&ieee->lock, flags); return 0; } EXPORT_SYMBOL(rtllib_wx_get_wap); int rtllib_wx_set_wap(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret = 0; u8 zero[] = {0, 0, 0, 0, 0, 0}; unsigned long flags; short ifup = ieee->proto_started; struct sockaddr *temp = (struct sockaddr *)awrq; rtllib_stop_scan_syncro(ieee); down(&ieee->wx_sem); /* use ifconfig hw ether */ if (ieee->iw_mode == IW_MODE_MASTER) { ret = -1; goto out; } if (temp->sa_family != ARPHRD_ETHER) { ret = -EINVAL; goto out; } if (memcmp(temp->sa_data, zero, ETH_ALEN) == 0) { spin_lock_irqsave(&ieee->lock, flags); memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN); ieee->wap_set = 0; spin_unlock_irqrestore(&ieee->lock, flags); ret = -1; goto out; } if (ifup) rtllib_stop_protocol(ieee, true); /* just to avoid to give inconsistent infos in the * get wx method. not really needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); ieee->cannot_notify = false; memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN); ieee->wap_set = (memcmp(temp->sa_data, zero, ETH_ALEN) != 0); spin_unlock_irqrestore(&ieee->lock, flags); if (ifup) rtllib_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(rtllib_wx_set_wap); int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int len, ret = 0; unsigned long flags; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->current_network.ssid[0] == '\0' || ieee->current_network.ssid_len == 0) { ret = -1; goto out; } if (ieee->state != RTLLIB_LINKED && ieee->state != RTLLIB_LINKED_SCANNING && ieee->ssid_set == 0) { ret = -1; goto out; } len = ieee->current_network.ssid_len; wrqu->essid.length = len; strncpy(b, ieee->current_network.ssid, len); wrqu->essid.flags = 1; out: spin_unlock_irqrestore(&ieee->lock, flags); return ret; } EXPORT_SYMBOL(rtllib_wx_get_essid); int rtllib_wx_set_rate(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 target_rate = wrqu->bitrate.value; ieee->rate = target_rate/100000; return 0; } EXPORT_SYMBOL(rtllib_wx_set_rate); int rtllib_wx_get_rate(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 tmp_rate = 0; tmp_rate = TxCountToDataRate(ieee, ieee->softmac_stats.CurrentShowTxate); wrqu->bitrate.value = tmp_rate * 500000; return 0; } EXPORT_SYMBOL(rtllib_wx_get_rate); int rtllib_wx_set_rts(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { if (wrqu->rts.disabled || !wrqu->rts.fixed) ieee->rts = DEFAULT_RTS_THRESHOLD; else { if (wrqu->rts.value < MIN_RTS_THRESHOLD || wrqu->rts.value > MAX_RTS_THRESHOLD) return -EINVAL; ieee->rts = wrqu->rts.value; } return 0; } EXPORT_SYMBOL(rtllib_wx_set_rts); int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { wrqu->rts.value = ieee->rts; wrqu->rts.fixed = 0; /* no auto select */ wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); return 0; } EXPORT_SYMBOL(rtllib_wx_get_rts); int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int set_mode_status = 0; rtllib_stop_scan_syncro(ieee); down(&ieee->wx_sem); switch (wrqu->mode) { case IW_MODE_MONITOR: case IW_MODE_ADHOC: case IW_MODE_INFRA: break; case IW_MODE_AUTO: wrqu->mode = IW_MODE_INFRA; break; default: set_mode_status = -EINVAL; goto out; } if (wrqu->mode == ieee->iw_mode) goto out; if (wrqu->mode == IW_MODE_MONITOR) { ieee->dev->type = ARPHRD_IEEE80211; rtllib_EnableNetMonitorMode(ieee->dev, false); } else { ieee->dev->type = ARPHRD_ETHER; if (ieee->iw_mode == IW_MODE_MONITOR) rtllib_DisableNetMonitorMode(ieee->dev, false); } if (!ieee->proto_started) { ieee->iw_mode = wrqu->mode; } else { rtllib_stop_protocol(ieee, true); ieee->iw_mode = wrqu->mode; rtllib_start_protocol(ieee); } out: up(&ieee->wx_sem); return set_mode_status; } EXPORT_SYMBOL(rtllib_wx_set_mode); void rtllib_wx_sync_scan_wq(void *data) { struct rtllib_device *ieee = container_of_work_rsl(data, struct rtllib_device, wx_sync_scan_wq); short chan; enum ht_extchnl_offset chan_offset = 0; enum ht_channel_width bandwidth = 0; int b40M = 0; static int count; if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) { rtllib_start_scan_syncro(ieee, 0); goto out; } chan = ieee->current_network.channel; if (ieee->LeisurePSLeave) ieee->LeisurePSLeave(ieee->dev); /* notify AP to be in PS mode */ rtllib_sta_ps_send_null_frame(ieee, 1); rtllib_sta_ps_send_null_frame(ieee, 1); rtllib_stop_all_queues(ieee); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); rtllib_stop_send_beacons(ieee); ieee->state = RTLLIB_LINKED_SCANNING; ieee->link_change(ieee->dev); /* wait for ps packet to be kicked out successfully */ msleep(50); if (ieee->ScanOperationBackupHandler) ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP); if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) { b40M = 1; chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset; bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz; RT_TRACE(COMP_DBG, "Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } rtllib_start_scan_syncro(ieee, 0); if (b40M) { RT_TRACE(COMP_DBG, "Scan in 20M, back to 40M\n"); if (chan_offset == HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, chan + 2); else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, chan - 2); else ieee->set_chan(ieee->dev, chan); ieee->SetBWModeHandler(ieee->dev, bandwidth, chan_offset); } else { ieee->set_chan(ieee->dev, chan); } if (ieee->ScanOperationBackupHandler) ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE); ieee->state = RTLLIB_LINKED; ieee->link_change(ieee->dev); /* Notify AP that I wake up again */ rtllib_sta_ps_send_null_frame(ieee, 0); if (ieee->LinkDetectInfo.NumRecvBcnInPeriod == 0 || ieee->LinkDetectInfo.NumRecvDataInPeriod == 0) { ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; ieee->LinkDetectInfo.NumRecvDataInPeriod = 1; } if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) rtllib_start_send_beacons(ieee); rtllib_wake_all_queues(ieee); count = 0; out: up(&ieee->wx_sem); } int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret = 0; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) { ret = -1; goto out; } if (ieee->state == RTLLIB_LINKED) { queue_work_rsl(ieee->wq, &ieee->wx_sync_scan_wq); /* intentionally forget to up sem */ return 0; } out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(rtllib_wx_set_scan); int rtllib_wx_set_essid(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { int ret = 0, len, i; short proto_started; unsigned long flags; rtllib_stop_scan_syncro(ieee); down(&ieee->wx_sem); proto_started = ieee->proto_started; len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length : IW_ESSID_MAX_SIZE; if (len > IW_ESSID_MAX_SIZE) { ret = -E2BIG; goto out; } if (ieee->iw_mode == IW_MODE_MONITOR) { ret = -1; goto out; } for (i = 0; i < len; i++) { if (extra[i] < 0) { ret = -1; goto out; } } if (proto_started) rtllib_stop_protocol(ieee, true); /* this is just to be sure that the GET wx callback * has consisten infos. not needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); if (wrqu->essid.flags && wrqu->essid.length) { strncpy(ieee->current_network.ssid, extra, len); ieee->current_network.ssid_len = len; ieee->cannot_notify = false; ieee->ssid_set = 1; } else { ieee->ssid_set = 0; ieee->current_network.ssid[0] = '\0'; ieee->current_network.ssid_len = 0; } spin_unlock_irqrestore(&ieee->lock, flags); if (proto_started) rtllib_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(rtllib_wx_set_essid); int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { wrqu->mode = ieee->iw_mode; return 0; } EXPORT_SYMBOL(rtllib_wx_get_mode); int rtllib_wx_set_rawtx(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = ieee->raw_tx; down(&ieee->wx_sem); if (enable) ieee->raw_tx = 1; else ieee->raw_tx = 0; printk(KERN_INFO"raw TX is %s\n", ieee->raw_tx ? "enabled" : "disabled"); if (ieee->iw_mode == IW_MODE_MONITOR) { if (prev == 0 && ieee->raw_tx) { if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } if (prev && ieee->raw_tx == 1) netif_carrier_off(ieee->dev); } up(&ieee->wx_sem); return 0; } EXPORT_SYMBOL(rtllib_wx_set_rawtx); int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strcpy(wrqu->name, "802.11"); if (ieee->modulation & RTLLIB_CCK_MODULATION) strcat(wrqu->name, "b"); if (ieee->modulation & RTLLIB_OFDM_MODULATION) strcat(wrqu->name, "g"); if (ieee->mode & (IEEE_N_24G | IEEE_N_5G)) strcat(wrqu->name, "n"); return 0; } EXPORT_SYMBOL(rtllib_wx_get_name); /* this is mostly stolen from hostap */ int rtllib_wx_set_power(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; if ((!ieee->sta_wake_up) || (!ieee->enter_sleep_state) || (!ieee->ps_is_queue_empty)) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "%s(): PS mode is tryied to be use " "but driver missed a callback\n\n", __func__); return -1; } down(&ieee->wx_sem); if (wrqu->power.disabled) { RT_TRACE(COMP_DBG, "===>%s(): power disable\n", __func__); ieee->ps = RTLLIB_PS_DISABLED; goto exit; } if (wrqu->power.flags & IW_POWER_TIMEOUT) { ieee->ps_timeout = wrqu->power.value / 1000; RT_TRACE(COMP_DBG, "===>%s():ps_timeout is %d\n", __func__, ieee->ps_timeout); } if (wrqu->power.flags & IW_POWER_PERIOD) ieee->ps_period = wrqu->power.value / 1000; switch (wrqu->power.flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: ieee->ps = RTLLIB_PS_UNICAST; break; case IW_POWER_MULTICAST_R: ieee->ps = RTLLIB_PS_MBCAST; break; case IW_POWER_ALL_R: ieee->ps = RTLLIB_PS_UNICAST | RTLLIB_PS_MBCAST; break; case IW_POWER_ON: break; default: ret = -EINVAL; goto exit; } exit: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(rtllib_wx_set_power); /* this is stolen from hostap */ int rtllib_wx_get_power(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; down(&ieee->wx_sem); if (ieee->ps == RTLLIB_PS_DISABLED) { wrqu->power.disabled = 1; goto exit; } wrqu->power.disabled = 0; if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrqu->power.flags = IW_POWER_TIMEOUT; wrqu->power.value = ieee->ps_timeout * 1000; } else { wrqu->power.flags = IW_POWER_PERIOD; wrqu->power.value = ieee->ps_period * 1000; } if ((ieee->ps & (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST)) == (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST)) wrqu->power.flags |= IW_POWER_ALL_R; else if (ieee->ps & RTLLIB_PS_MBCAST) wrqu->power.flags |= IW_POWER_MULTICAST_R; else wrqu->power.flags |= IW_POWER_UNICAST_R; exit: up(&ieee->wx_sem); return ret; } EXPORT_SYMBOL(rtllib_wx_get_power);
gpl-2.0
bilalliberty/android_kernel_htc_villec2-caf-display
sound/soc/jz4740/jz4740-i2s.c
4899
12934
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "jz4740-i2s.h" #include "jz4740-pcm.h" #define JZ_REG_AIC_CONF 0x00 #define JZ_REG_AIC_CTRL 0x04 #define JZ_REG_AIC_I2S_FMT 0x10 #define JZ_REG_AIC_FIFO_STATUS 0x14 #define JZ_REG_AIC_I2S_STATUS 0x1c #define JZ_REG_AIC_CLK_DIV 0x30 #define JZ_REG_AIC_FIFO 0x34 #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_MASK (0xf << 12) #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_MASK (0xf << 8) #define JZ_AIC_CONF_OVERFLOW_PLAY_LAST BIT(6) #define JZ_AIC_CONF_INTERNAL_CODEC BIT(5) #define JZ_AIC_CONF_I2S BIT(4) #define JZ_AIC_CONF_RESET BIT(3) #define JZ_AIC_CONF_BIT_CLK_MASTER BIT(2) #define JZ_AIC_CONF_SYNC_CLK_MASTER BIT(1) #define JZ_AIC_CONF_ENABLE BIT(0) #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12 #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8 #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19) #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16) #define JZ_AIC_CTRL_ENABLE_RX_DMA BIT(15) #define JZ_AIC_CTRL_ENABLE_TX_DMA BIT(14) #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) #define JZ_AIC_CTRL_FLUSH BIT(8) #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) #define JZ_AIC_CTRL_ENABLE_TFS_INT BIT(3) #define JZ_AIC_CTRL_ENABLE_LOOPBACK BIT(2) #define JZ_AIC_CTRL_ENABLE_PLAYBACK BIT(1) #define JZ_AIC_CTRL_ENABLE_CAPTURE BIT(0) #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET 19 #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16 #define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12) #define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4) #define JZ_AIC_I2S_FMT_MSB BIT(0) #define JZ_AIC_I2S_STATUS_BUSY BIT(2) #define JZ_AIC_CLK_DIV_MASK 0xf struct jz4740_i2s { struct resource *mem; void __iomem *base; dma_addr_t phys_base; struct clk *clk_aic; struct clk *clk_i2s; struct jz4740_pcm_config pcm_config_playback; struct jz4740_pcm_config pcm_config_capture; }; static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s, unsigned int reg) { return readl(i2s->base + reg); } static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, unsigned int reg, uint32_t value) { writel(value, i2s->base + reg); } static int jz4740_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf, ctrl; if (dai->active) return 0; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); ctrl |= JZ_AIC_CTRL_FLUSH; jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); clk_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) return; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable(i2s->clk_i2s); } static int jz4740_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t ctrl; uint32_t mask; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) mask = JZ_AIC_CTRL_ENABLE_PLAYBACK | JZ_AIC_CTRL_ENABLE_TX_DMA; else mask = JZ_AIC_CTRL_ENABLE_CAPTURE | JZ_AIC_CTRL_ENABLE_RX_DMA; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ctrl |= mask; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ctrl &= ~mask; break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); return 0; } static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t format = 0; uint32_t conf; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER; format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK; break; case SND_SOC_DAIFMT_CBM_CFS: conf |= JZ_AIC_CONF_SYNC_CLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFM: conf |= JZ_AIC_CONF_BIT_CLK_MASTER; break; case SND_SOC_DAIFMT_CBM_CFM: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_MSB: format |= JZ_AIC_I2S_FMT_MSB; break; case SND_SOC_DAIFMT_I2S: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); jz4740_i2s_write(i2s, JZ_REG_AIC_I2S_FMT, format); return 0; } static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); enum jz4740_dma_width dma_width; struct jz4740_pcm_config *pcm_config; unsigned int sample_size; uint32_t ctrl; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: sample_size = 0; dma_width = JZ4740_DMA_WIDTH_8BIT; break; case SNDRV_PCM_FORMAT_S16: sample_size = 1; dma_width = JZ4740_DMA_WIDTH_16BIT; break; default: return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ctrl &= ~JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET; if (params_channels(params) == 1) ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO; else ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO; pcm_config = &i2s->pcm_config_playback; pcm_config->dma_config.dst_width = dma_width; } else { ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET; pcm_config = &i2s->pcm_config_capture; pcm_config->dma_config.src_width = dma_width; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); snd_soc_dai_set_dma_data(dai, substream, pcm_config); return 0; } static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); struct clk *parent; int ret = 0; switch (clk_id) { case JZ4740_I2S_CLKSRC_EXT: parent = clk_get(NULL, "ext"); clk_set_parent(i2s->clk_i2s, parent); break; case JZ4740_I2S_CLKSRC_PLL: parent = clk_get(NULL, "pll half"); clk_set_parent(i2s->clk_i2s, parent); ret = clk_set_rate(i2s->clk_i2s, freq); break; default: return -EINVAL; } clk_put(parent); return ret; } static int jz4740_i2s_suspend(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) { conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable(i2s->clk_i2s); } clk_disable(i2s->clk_aic); return 0; } static int jz4740_i2s_resume(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_enable(i2s->clk_aic); if (dai->active) { clk_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); } return 0; } static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s) { struct jz4740_dma_config *dma_config; /* Playback */ dma_config = &i2s->pcm_config_playback.dma_config; dma_config->src_width = JZ4740_DMA_WIDTH_32BIT, dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE; dma_config->request_type = JZ4740_DMA_TYPE_AIC_TRANSMIT; dma_config->flags = JZ4740_DMA_SRC_AUTOINC; dma_config->mode = JZ4740_DMA_MODE_SINGLE; i2s->pcm_config_playback.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO; /* Capture */ dma_config = &i2s->pcm_config_capture.dma_config; dma_config->dst_width = JZ4740_DMA_WIDTH_32BIT, dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE; dma_config->request_type = JZ4740_DMA_TYPE_AIC_RECEIVE; dma_config->flags = JZ4740_DMA_DST_AUTOINC; dma_config->mode = JZ4740_DMA_MODE_SINGLE; i2s->pcm_config_capture.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO; } static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_enable(i2s->clk_aic); jz4740_i2c_init_pcm_config(i2s); conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) | (8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) | JZ_AIC_CONF_OVERFLOW_PLAY_LAST | JZ_AIC_CONF_I2S | JZ_AIC_CONF_INTERNAL_CODEC; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET); jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static int jz4740_i2s_dai_remove(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); clk_disable(i2s->clk_aic); return 0; } static const struct snd_soc_dai_ops jz4740_i2s_dai_ops = { .startup = jz4740_i2s_startup, .shutdown = jz4740_i2s_shutdown, .trigger = jz4740_i2s_trigger, .hw_params = jz4740_i2s_hw_params, .set_fmt = jz4740_i2s_set_fmt, .set_sysclk = jz4740_i2s_set_sysclk, }; #define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE) static struct snd_soc_dai_driver jz4740_i2s_dai = { .probe = jz4740_i2s_dai_probe, .remove = jz4740_i2s_dai_remove, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .symmetric_rates = 1, .ops = &jz4740_i2s_dai_ops, .suspend = jz4740_i2s_suspend, .resume = jz4740_i2s_resume, }; static int __devinit jz4740_i2s_dev_probe(struct platform_device *pdev) { struct jz4740_i2s *i2s; int ret; i2s = kzalloc(sizeof(*i2s), GFP_KERNEL); if (!i2s) return -ENOMEM; i2s->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!i2s->mem) { ret = -ENOENT; goto err_free; } i2s->mem = request_mem_region(i2s->mem->start, resource_size(i2s->mem), pdev->name); if (!i2s->mem) { ret = -EBUSY; goto err_free; } i2s->base = ioremap_nocache(i2s->mem->start, resource_size(i2s->mem)); if (!i2s->base) { ret = -EBUSY; goto err_release_mem_region; } i2s->phys_base = i2s->mem->start; i2s->clk_aic = clk_get(&pdev->dev, "aic"); if (IS_ERR(i2s->clk_aic)) { ret = PTR_ERR(i2s->clk_aic); goto err_iounmap; } i2s->clk_i2s = clk_get(&pdev->dev, "i2s"); if (IS_ERR(i2s->clk_i2s)) { ret = PTR_ERR(i2s->clk_i2s); goto err_clk_put_aic; } platform_set_drvdata(pdev, i2s); ret = snd_soc_register_dai(&pdev->dev, &jz4740_i2s_dai); if (ret) { dev_err(&pdev->dev, "Failed to register DAI\n"); goto err_clk_put_i2s; } return 0; err_clk_put_i2s: clk_put(i2s->clk_i2s); err_clk_put_aic: clk_put(i2s->clk_aic); err_iounmap: iounmap(i2s->base); err_release_mem_region: release_mem_region(i2s->mem->start, resource_size(i2s->mem)); err_free: kfree(i2s); return ret; } static int __devexit jz4740_i2s_dev_remove(struct platform_device *pdev) { struct jz4740_i2s *i2s = platform_get_drvdata(pdev); snd_soc_unregister_dai(&pdev->dev); clk_put(i2s->clk_i2s); clk_put(i2s->clk_aic); iounmap(i2s->base); release_mem_region(i2s->mem->start, resource_size(i2s->mem)); platform_set_drvdata(pdev, NULL); kfree(i2s); return 0; } static struct platform_driver jz4740_i2s_driver = { .probe = jz4740_i2s_dev_probe, .remove = __devexit_p(jz4740_i2s_dev_remove), .driver = { .name = "jz4740-i2s", .owner = THIS_MODULE, }, }; module_platform_driver(jz4740_i2s_driver); MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>"); MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-i2s");
gpl-2.0
yajnab/android_kernel_samsung_delos3geur
fs/yaffs2/yaffs_mtdif.c
5155
1224
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "yportenv.h" #include "yaffs_mtdif.h" #include "linux/mtd/mtd.h" #include "linux/types.h" #include "linux/time.h" #include "linux/mtd/nand.h" #include "yaffs_linux.h" int nandmtd_erase_block(struct yaffs_dev *dev, int block_no) { struct mtd_info *mtd = yaffs_dev_to_mtd(dev); u32 addr = ((loff_t) block_no) * dev->param.total_bytes_per_chunk * dev->param.chunks_per_block; struct erase_info ei; int retval = 0; ei.mtd = mtd; ei.addr = addr; ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block; ei.time = 1000; ei.retries = 2; ei.callback = NULL; ei.priv = (u_long) dev; retval = mtd_erase(mtd, &ei); if (retval == 0) return YAFFS_OK; else return YAFFS_FAIL; } int nandmtd_initialise(struct yaffs_dev *dev) { return YAFFS_OK; }
gpl-2.0
c8813q-dev/android_kernel_huawei_c8813q
net/rfkill/input.c
5155
9048
/* * Input layer to RF Kill interface connector * * Copyright (c) 2007 Dmitry Torokhov * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * If you ever run into a situation in which you have a SW_ type rfkill * input device, then you can revive code that was removed in the patch * "rfkill-input: remove unused code". */ #include <linux/input.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <linux/workqueue.h> #include <linux/init.h> #include <linux/rfkill.h> #include <linux/sched.h> #include "rfkill.h" enum rfkill_input_master_mode { RFKILL_INPUT_MASTER_UNLOCK = 0, RFKILL_INPUT_MASTER_RESTORE = 1, RFKILL_INPUT_MASTER_UNBLOCKALL = 2, NUM_RFKILL_INPUT_MASTER_MODES }; /* Delay (in ms) between consecutive switch ops */ #define RFKILL_OPS_DELAY 200 static enum rfkill_input_master_mode rfkill_master_switch_mode = RFKILL_INPUT_MASTER_UNBLOCKALL; module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); MODULE_PARM_DESC(master_switch_mode, "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); static spinlock_t rfkill_op_lock; static bool rfkill_op_pending; static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; enum rfkill_sched_op { RFKILL_GLOBAL_OP_EPO = 0, RFKILL_GLOBAL_OP_RESTORE, RFKILL_GLOBAL_OP_UNLOCK, RFKILL_GLOBAL_OP_UNBLOCK, }; static enum rfkill_sched_op rfkill_master_switch_op; static enum rfkill_sched_op rfkill_op; static void __rfkill_handle_global_op(enum rfkill_sched_op op) { unsigned int i; switch (op) { case RFKILL_GLOBAL_OP_EPO: rfkill_epo(); break; case RFKILL_GLOBAL_OP_RESTORE: rfkill_restore_states(); break; case RFKILL_GLOBAL_OP_UNLOCK: rfkill_remove_epo_lock(); break; case RFKILL_GLOBAL_OP_UNBLOCK: rfkill_remove_epo_lock(); for (i = 0; i < NUM_RFKILL_TYPES; i++) rfkill_switch_all(i, false); break; default: /* memory corruption or bug, fail safely */ rfkill_epo(); WARN(1, "Unknown requested operation %d! " "rfkill Emergency Power Off activated\n", op); } } static void __rfkill_handle_normal_op(const enum rfkill_type type, const bool complement) { bool blocked; blocked = rfkill_get_global_sw_state(type); if (complement) blocked = !blocked; rfkill_switch_all(type, blocked); } static void rfkill_op_handler(struct work_struct *work) { unsigned int i; bool c; spin_lock_irq(&rfkill_op_lock); do { if (rfkill_op_pending) { enum rfkill_sched_op op = rfkill_op; rfkill_op_pending = false; memset(rfkill_sw_pending, 0, sizeof(rfkill_sw_pending)); spin_unlock_irq(&rfkill_op_lock); __rfkill_handle_global_op(op); spin_lock_irq(&rfkill_op_lock); /* * handle global ops first -- during unlocked period * we might have gotten a new global op. */ if (rfkill_op_pending) continue; } if (rfkill_is_epo_lock_active()) continue; for (i = 0; i < NUM_RFKILL_TYPES; i++) { if (__test_and_clear_bit(i, rfkill_sw_pending)) { c = __test_and_clear_bit(i, rfkill_sw_state); spin_unlock_irq(&rfkill_op_lock); __rfkill_handle_normal_op(i, c); spin_lock_irq(&rfkill_op_lock); } } } while (rfkill_op_pending); spin_unlock_irq(&rfkill_op_lock); } static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); static unsigned long rfkill_last_scheduled; static unsigned long rfkill_ratelimit(const unsigned long last) { const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); return time_after(jiffies, last + delay) ? 0 : delay; } static void rfkill_schedule_ratelimited(void) { if (delayed_work_pending(&rfkill_op_work)) return; schedule_delayed_work(&rfkill_op_work, rfkill_ratelimit(rfkill_last_scheduled)); rfkill_last_scheduled = jiffies; } static void rfkill_schedule_global_op(enum rfkill_sched_op op) { unsigned long flags; spin_lock_irqsave(&rfkill_op_lock, flags); rfkill_op = op; rfkill_op_pending = true; if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { /* bypass the limiter for EPO */ cancel_delayed_work(&rfkill_op_work); schedule_delayed_work(&rfkill_op_work, 0); rfkill_last_scheduled = jiffies; } else rfkill_schedule_ratelimited(); spin_unlock_irqrestore(&rfkill_op_lock, flags); } static void rfkill_schedule_toggle(enum rfkill_type type) { unsigned long flags; if (rfkill_is_epo_lock_active()) return; spin_lock_irqsave(&rfkill_op_lock, flags); if (!rfkill_op_pending) { __set_bit(type, rfkill_sw_pending); __change_bit(type, rfkill_sw_state); rfkill_schedule_ratelimited(); } spin_unlock_irqrestore(&rfkill_op_lock, flags); } static void rfkill_schedule_evsw_rfkillall(int state) { if (state) rfkill_schedule_global_op(rfkill_master_switch_op); else rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); } static void rfkill_event(struct input_handle *handle, unsigned int type, unsigned int code, int data) { if (type == EV_KEY && data == 1) { switch (code) { case KEY_WLAN: rfkill_schedule_toggle(RFKILL_TYPE_WLAN); break; case KEY_BLUETOOTH: rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); break; case KEY_UWB: rfkill_schedule_toggle(RFKILL_TYPE_UWB); break; case KEY_WIMAX: rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); break; case KEY_RFKILL: rfkill_schedule_toggle(RFKILL_TYPE_ALL); break; } } else if (type == EV_SW && code == SW_RFKILL_ALL) rfkill_schedule_evsw_rfkillall(data); } static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "rfkill"; /* causes rfkill_start() to be called */ error = input_register_handle(handle); if (error) goto err_free_handle; error = input_open_device(handle); if (error) goto err_unregister_handle; return 0; err_unregister_handle: input_unregister_handle(handle); err_free_handle: kfree(handle); return error; } static void rfkill_start(struct input_handle *handle) { /* * Take event_lock to guard against configuration changes, we * should be able to deal with concurrency with rfkill_event() * just fine (which event_lock will also avoid). */ spin_lock_irq(&handle->dev->event_lock); if (test_bit(EV_SW, handle->dev->evbit) && test_bit(SW_RFKILL_ALL, handle->dev->swbit)) rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, handle->dev->sw)); spin_unlock_irq(&handle->dev->event_lock); } static void rfkill_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id rfkill_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, .evbit = { BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, .evbit = { BIT(EV_SW) }, .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, }, { } }; static struct input_handler rfkill_handler = { .name = "rfkill", .event = rfkill_event, .connect = rfkill_connect, .start = rfkill_start, .disconnect = rfkill_disconnect, .id_table = rfkill_ids, }; int __init rfkill_handler_init(void) { switch (rfkill_master_switch_mode) { case RFKILL_INPUT_MASTER_UNBLOCKALL: rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; break; case RFKILL_INPUT_MASTER_RESTORE: rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; break; case RFKILL_INPUT_MASTER_UNLOCK: rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; break; default: return -EINVAL; } spin_lock_init(&rfkill_op_lock); /* Avoid delay at first schedule */ rfkill_last_scheduled = jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; return input_register_handler(&rfkill_handler); } void __exit rfkill_handler_exit(void) { input_unregister_handler(&rfkill_handler); cancel_delayed_work_sync(&rfkill_op_work); }
gpl-2.0
OneEducation/kernel-rk310-lollipop-firefly
drivers/char/agp/i460-agp.c
5411
18895
/* * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of * the "Intel 460GTX Chipset Software Developer's Manual": * http://www.intel.com/design/archives/itanium/downloads/248704.htm */ /* * 460GX support by Chris Ahna <christopher.j.ahna@intel.com> * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/agp_backend.h> #include <linux/log2.h> #include "agp.h" #define INTEL_I460_BAPBASE 0x98 #define INTEL_I460_GXBCTL 0xa0 #define INTEL_I460_AGPSIZ 0xa2 #define INTEL_I460_ATTBASE 0xfe200000 #define INTEL_I460_GATT_VALID (1UL << 24) #define INTEL_I460_GATT_COHERENT (1UL << 25) /* * The i460 can operate with large (4MB) pages, but there is no sane way to support this * within the current kernel/DRM environment, so we disable the relevant code for now. * See also comments in ia64_alloc_page()... */ #define I460_LARGE_IO_PAGES 0 #if I460_LARGE_IO_PAGES # define I460_IO_PAGE_SHIFT i460.io_page_shift #else # define I460_IO_PAGE_SHIFT 12 #endif #define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT) #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT)) #define I460_SRAM_IO_DISABLE (1 << 4) #define I460_BAPBASE_ENABLE (1 << 3) #define I460_AGPSIZ_MASK 0x7 #define I460_4M_PS (1 << 1) /* Control bits for Out-Of-GART coherency and Burst Write Combining */ #define I460_GXBCTL_OOG (1UL << 0) #define I460_GXBCTL_BWC (1UL << 2) /* * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the * gatt_table and gatt_table_real pointers a "void *"... */ #define RD_GATT(index) readl((u32 *) i460.gatt + (index)) #define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index)) /* * The 460 spec says we have to read the last location written to make sure that all * writes have taken effect */ #define WR_FLUSH_GATT(index) RD_GATT(index) static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type); static struct { void *gatt; /* ioremap'd GATT area */ /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */ u8 io_page_shift; /* BIOS configures chipset to one of 2 possible apbase values: */ u8 dynamic_apbase; /* structure for tracking partial use of 4MB GART pages: */ struct lp_desc { unsigned long *alloced_map; /* bitmap of kernel-pages in use */ int refcount; /* number of kernel pages using the large page */ u64 paddr; /* physical address of large page */ struct page *page; /* page pointer */ } *lp_desc; } i460; static const struct aper_size_info_8 i460_sizes[3] = { /* * The 32GB aperture is only available with a 4M GART page size. Due to the * dynamic GART page size, we can't figure out page_order or num_entries until * runtime. */ {32768, 0, 0, 4}, {1024, 0, 0, 2}, {256, 0, 0, 1} }; static struct gatt_mask i460_masks[] = { { .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, .type = 0 } }; static int i460_fetch_size (void) { int i; u8 temp; struct aper_size_info_8 *values; /* Determine the GART page size */ pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); if (i460.io_page_shift != I460_IO_PAGE_SHIFT) { printk(KERN_ERR PFX "I/O (GART) page-size %luKB doesn't match expected " "size %luKB\n", 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT)); return 0; } values = A_SIZE_8(agp_bridge->driver->aperture_sizes); pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); /* Exit now if the IO drivers for the GART SRAMS are turned off */ if (temp & I460_SRAM_IO_DISABLE) { printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); printk(KERN_ERR PFX "AGPGART operation not possible\n"); return 0; } /* Make sure we don't try to create an 2 ^ 23 entry GATT */ if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); return 0; } /* Determine the proper APBASE register */ if (temp & I460_BAPBASE_ENABLE) i460.dynamic_apbase = INTEL_I460_BAPBASE; else i460.dynamic_apbase = AGP_APBASE; for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { /* * Dynamically calculate the proper num_entries and page_order values for * the define aperture sizes. Take care not to shift off the end of * values[i].size. */ values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); } for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { /* Neglect control bits when matching up size_value */ if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } /* There isn't anything to do here since 460 has no GART TLB. */ static void i460_tlb_flush (struct agp_memory *mem) { return; } /* * This utility function is needed to prevent corruption of the control bits * which are stored along with the aperture size in 460's AGPSIZ register */ static void i460_write_agpsiz (u8 size_value) { u8 temp; pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, ((temp & ~I460_AGPSIZ_MASK) | size_value)); } static void i460_cleanup (void) { struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge->previous_size); i460_write_agpsiz(previous_size->size_value); if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) kfree(i460.lp_desc); } static int i460_configure (void) { union { u32 small[2]; u64 large; } temp; size_t size; u8 scratch; struct aper_size_info_8 *current_size; temp.large = 0; current_size = A_SIZE_8(agp_bridge->current_size); i460_write_agpsiz(current_size->size_value); /* * Do the necessary rigmarole to read all eight bytes of APBASE. * This has to be done since the AGP aperture can be above 4GB on * 460 based systems. */ pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); /* Clear BAR control bits */ agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); /* * Initialize partial allocation trackers if a GART page is bigger than a kernel * page. */ if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { size = current_size->num_entries * sizeof(i460.lp_desc[0]); i460.lp_desc = kzalloc(size, GFP_KERNEL); if (!i460.lp_desc) return -ENOMEM; } return 0; } static int i460_create_gatt_table (struct agp_bridge_data *bridge) { int page_order, num_entries, i; void *temp; /* * Load up the fixed address of the GART SRAMS which hold our GATT table. */ temp = agp_bridge->current_size; page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); if (!i460.gatt) { printk(KERN_ERR PFX "ioremap failed\n"); return -ENOMEM; } /* These are no good, the should be removed from the agp_bridge strucure... */ agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; agp_bridge->gatt_bus_addr = 0; for (i = 0; i < num_entries; ++i) WR_GATT(i, 0); WR_FLUSH_GATT(i - 1); return 0; } static int i460_free_gatt_table (struct agp_bridge_data *bridge) { int num_entries, i; void *temp; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; for (i = 0; i < num_entries; ++i) WR_GATT(i, 0); WR_FLUSH_GATT(num_entries - 1); iounmap(i460.gatt); return 0; } /* * The following functions are called when the I/O (GART) page size is smaller than * PAGE_SIZE. */ static int i460_insert_memory_small_io_page (struct agp_memory *mem, off_t pg_start, int type) { unsigned long paddr, io_pg_start, io_page_size; int i, j, k, num_entries; void *temp; pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", mem, pg_start, type, page_to_phys(mem->pages[0])); if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); return -EINVAL; } j = io_pg_start; while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) { pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", j, RD_GATT(j)); return -EBUSY; } j++; } io_page_size = 1UL << I460_IO_PAGE_SHIFT; for (i = 0, j = io_pg_start; i < mem->page_count; i++) { paddr = page_to_phys(mem->pages[i]); for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); } WR_FLUSH_GATT(j - 1); return 0; } static int i460_remove_memory_small_io_page(struct agp_memory *mem, off_t pg_start, int type) { int i; pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", mem, pg_start, type); pg_start = I460_IOPAGES_PER_KPAGE * pg_start; for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) WR_GATT(i, 0); WR_FLUSH_GATT(i - 1); return 0; } #if I460_LARGE_IO_PAGES /* * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE. * * This situation is interesting since AGP memory allocations that are smaller than a * single GART page are possible. The i460.lp_desc array tracks partial allocation of the * large GART pages to work around this issue. * * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated). */ static int i460_alloc_large_page (struct lp_desc *lp) { unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; size_t map_size; lp->page = alloc_pages(GFP_KERNEL, order); if (!lp->page) { printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); return -ENOMEM; } map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; lp->alloced_map = kzalloc(map_size, GFP_KERNEL); if (!lp->alloced_map) { __free_pages(lp->page, order); printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); return -ENOMEM; } lp->paddr = page_to_phys(lp->page); lp->refcount = 0; atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); return 0; } static void i460_free_large_page (struct lp_desc *lp) { kfree(lp->alloced_map); lp->alloced_map = NULL; __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); } static int i460_insert_memory_large_io_page (struct agp_memory *mem, off_t pg_start, int type) { int i, start_offset, end_offset, idx, pg, num_entries; struct lp_desc *start, *end, *lp; void *temp; if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; start_offset = pg_start % I460_KPAGES_PER_IOPAGE; end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; if (end > i460.lp_desc + num_entries) { printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); return -EINVAL; } /* Check if the requested region of the aperture is free */ for (lp = start; lp <= end; ++lp) { if (!lp->alloced_map) continue; /* OK, the entire large page is available... */ for (idx = ((lp == start) ? start_offset : 0); idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++) { if (test_bit(idx, lp->alloced_map)) return -EBUSY; } } for (lp = start, i = 0; lp <= end; ++lp) { if (!lp->alloced_map) { /* Allocate new GART pages... */ if (i460_alloc_large_page(lp) < 0) return -ENOMEM; pg = lp - i460.lp_desc; WR_GATT(pg, i460_mask_memory(agp_bridge, lp->paddr, 0)); WR_FLUSH_GATT(pg); } for (idx = ((lp == start) ? start_offset : 0); idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { mem->pages[i] = lp->page; __set_bit(idx, lp->alloced_map); ++lp->refcount; } } return 0; } static int i460_remove_memory_large_io_page (struct agp_memory *mem, off_t pg_start, int type) { int i, pg, start_offset, end_offset, idx, num_entries; struct lp_desc *start, *end, *lp; void *temp; temp = agp_bridge->current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; start_offset = pg_start % I460_KPAGES_PER_IOPAGE; end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; for (i = 0, lp = start; lp <= end; ++lp) { for (idx = ((lp == start) ? start_offset : 0); idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { mem->pages[i] = NULL; __clear_bit(idx, lp->alloced_map); --lp->refcount; } /* Free GART pages if they are unused */ if (lp->refcount == 0) { pg = lp - i460.lp_desc; WR_GATT(pg, 0); WR_FLUSH_GATT(pg); i460_free_large_page(lp); } } return 0; } /* Wrapper routines to call the approriate {small_io_page,large_io_page} function */ static int i460_insert_memory (struct agp_memory *mem, off_t pg_start, int type) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) return i460_insert_memory_small_io_page(mem, pg_start, type); else return i460_insert_memory_large_io_page(mem, pg_start, type); } static int i460_remove_memory (struct agp_memory *mem, off_t pg_start, int type) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) return i460_remove_memory_small_io_page(mem, pg_start, type); else return i460_remove_memory_large_io_page(mem, pg_start, type); } /* * If the I/O (GART) page size is bigger than the kernel page size, we don't want to * allocate memory until we know where it is to be bound in the aperture (a * multi-kernel-page alloc might fit inside of an already allocated GART page). * * Let's just hope nobody counts on the allocated AGP memory being there before bind time * (I don't think current drivers do)... */ static struct page *i460_alloc_page (struct agp_bridge_data *bridge) { void *page; if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { page = agp_generic_alloc_page(agp_bridge); } else /* Returning NULL would cause problems */ /* AK: really dubious code. */ page = (void *)~0UL; return page; } static void i460_destroy_page (struct page *page, int flags) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { agp_generic_destroy_page(page, flags); } } #endif /* I460_LARGE_IO_PAGES */ static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type) { /* Make sure the returned address is a valid GATT entry */ return bridge->driver->masks[0].mask | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); } const struct agp_bridge_driver intel_i460_driver = { .owner = THIS_MODULE, .aperture_sizes = i460_sizes, .size_type = U8_APER_SIZE, .num_aperture_sizes = 3, .configure = i460_configure, .fetch_size = i460_fetch_size, .cleanup = i460_cleanup, .tlb_flush = i460_tlb_flush, .mask_memory = i460_mask_memory, .masks = i460_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, .create_gatt_table = i460_create_gatt_table, .free_gatt_table = i460_free_gatt_table, #if I460_LARGE_IO_PAGES .insert_memory = i460_insert_memory, .remove_memory = i460_remove_memory, .agp_alloc_page = i460_alloc_page, .agp_destroy_page = i460_destroy_page, #else .insert_memory = i460_insert_memory_small_io_page, .remove_memory = i460_remove_memory_small_io_page, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, #endif .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type, .cant_use_aperture = true, }; static int agp_intel_i460_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &intel_i460_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; printk(KERN_INFO PFX "Detected Intel 460GX chipset\n"); pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void agp_intel_i460_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } static struct pci_device_id agp_intel_i460_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_84460GX, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); static struct pci_driver agp_intel_i460_pci_driver = { .name = "agpgart-intel-i460", .id_table = agp_intel_i460_pci_table, .probe = agp_intel_i460_probe, .remove = agp_intel_i460_remove, }; static int __init agp_intel_i460_init(void) { if (agp_off) return -EINVAL; return pci_register_driver(&agp_intel_i460_pci_driver); } static void __exit agp_intel_i460_cleanup(void) { pci_unregister_driver(&agp_intel_i460_pci_driver); } module_init(agp_intel_i460_init); module_exit(agp_intel_i460_cleanup); MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
arj1231/kernel_lge_msm8226
drivers/video/geode/gx1fb_core.c
7971
12565
/* * drivers/video/geode/gx1fb_core.c * -- Geode GX1 framebuffer driver * * Copyright (C) 2005 Arcom Control Systems Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #include "geodefb.h" #include "display_gx1.h" #include "video_cs5530.h" static char mode_option[32] = "640x480-16@60"; static int crt_option = 1; static char panel_option[32] = ""; /* Modes relevant to the GX1 (taken from modedb.c) */ static const struct fb_videomode __devinitdata gx1_modedb[] = { /* 640x480-60 VESA */ { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 640x480-75 VESA */ { NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 640x480-85 VESA */ { NULL, 85, 640, 480, 27777, 80, 56, 25, 01, 56, 3, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 800x600-60 VESA */ { NULL, 60, 800, 600, 25000, 88, 40, 23, 01, 128, 4, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 800x600-75 VESA */ { NULL, 75, 800, 600, 20202, 160, 16, 21, 01, 80, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 800x600-85 VESA */ { NULL, 85, 800, 600, 17761, 152, 32, 27, 01, 64, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1024x768-60 VESA */ { NULL, 60, 1024, 768, 15384, 160, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1024x768-75 VESA */ { NULL, 75, 1024, 768, 12690, 176, 16, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1024x768-85 VESA */ { NULL, 85, 1024, 768, 10582, 208, 48, 36, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1280x960-60 VESA */ { NULL, 60, 1280, 960, 9259, 312, 96, 36, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1280x960-85 VESA */ { NULL, 85, 1280, 960, 6734, 224, 64, 47, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1280x1024-60 VESA */ { NULL, 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1280x1024-75 VESA */ { NULL, 75, 1280, 1024, 7407, 248, 16, 38, 1, 144, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 1280x1024-85 VESA */ { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; static int gx1_line_delta(int xres, int bpp) { int line_delta = xres * (bpp >> 3); if (line_delta > 2048) line_delta = 4096; else if (line_delta > 1024) line_delta = 2048; else line_delta = 1024; return line_delta; } static int gx1fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct geodefb_par *par = info->par; /* Maximum resolution is 1280x1024. */ if (var->xres > 1280 || var->yres > 1024) return -EINVAL; if (par->panel_x && (var->xres > par->panel_x || var->yres > par->panel_y)) return -EINVAL; /* Only 16 bpp and 8 bpp is supported by the hardware. */ if (var->bits_per_pixel == 16) { var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; } else if (var->bits_per_pixel == 8) { var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; } else return -EINVAL; /* Enough video memory? */ if (gx1_line_delta(var->xres, var->bits_per_pixel) * var->yres > info->fix.smem_len) return -EINVAL; /* FIXME: Check timing parameters here? */ return 0; } static int gx1fb_set_par(struct fb_info *info) { struct geodefb_par *par = info->par; if (info->var.bits_per_pixel == 16) info->fix.visual = FB_VISUAL_TRUECOLOR; else info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = gx1_line_delta(info->var.xres, info->var.bits_per_pixel); par->dc_ops->set_mode(info); return 0; } static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int gx1fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct geodefb_par *par = info->par; if (info->var.grayscale) { /* grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } /* Truecolor has hardware independent palette */ if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *pal = info->pseudo_palette; u32 v; if (regno >= 16) return -EINVAL; v = chan_to_field(red, &info->var.red); v |= chan_to_field(green, &info->var.green); v |= chan_to_field(blue, &info->var.blue); pal[regno] = v; } else { if (regno >= 256) return -EINVAL; par->dc_ops->set_palette_reg(info, regno, red, green, blue); } return 0; } static int gx1fb_blank(int blank_mode, struct fb_info *info) { struct geodefb_par *par = info->par; return par->vid_ops->blank_display(info, blank_mode); } static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev) { struct geodefb_par *par = info->par; unsigned gx_base; int fb_len; int ret; gx_base = gx1_gx_base(); if (!gx_base) return -ENODEV; ret = pci_enable_device(dev); if (ret < 0) return ret; ret = pci_request_region(dev, 0, "gx1fb (video)"); if (ret < 0) return ret; par->vid_regs = pci_ioremap_bar(dev, 0); if (!par->vid_regs) return -ENOMEM; if (!request_mem_region(gx_base + 0x8300, 0x100, "gx1fb (display controller)")) return -EBUSY; par->dc_regs = ioremap(gx_base + 0x8300, 0x100); if (!par->dc_regs) return -ENOMEM; if ((fb_len = gx1_frame_buffer_size()) < 0) return -ENOMEM; info->fix.smem_start = gx_base + 0x800000; info->fix.smem_len = fb_len; info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) return -ENOMEM; dev_info(&dev->dev, "%d Kibyte of video memory at 0x%lx\n", info->fix.smem_len / 1024, info->fix.smem_start); return 0; } static int parse_panel_option(struct fb_info *info) { struct geodefb_par *par = info->par; if (strcmp(panel_option, "") != 0) { int x, y; char *s; x = simple_strtol(panel_option, &s, 10); if (!x) return -EINVAL; y = simple_strtol(s + 1, NULL, 10); if (!y) return -EINVAL; par->panel_x = x; par->panel_y = y; } return 0; } static struct fb_ops gx1fb_ops = { .owner = THIS_MODULE, .fb_check_var = gx1fb_check_var, .fb_set_par = gx1fb_set_par, .fb_setcolreg = gx1fb_setcolreg, .fb_blank = gx1fb_blank, /* No HW acceleration for now. */ .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev) { struct geodefb_par *par; struct fb_info *info; /* Alloc enough space for the pseudo palette. */ info = framebuffer_alloc(sizeof(struct geodefb_par) + sizeof(u32) * 16, dev); if (!info) return NULL; par = info->par; strcpy(info->fix.id, "GX1"); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.accel = FB_ACCEL_NONE; info->var.nonstd = 0; info->var.activate = FB_ACTIVATE_NOW; info->var.height = -1; info->var.width = -1; info->var.accel_flags = 0; info->var.vmode = FB_VMODE_NONINTERLACED; info->fbops = &gx1fb_ops; info->flags = FBINFO_DEFAULT; info->node = -1; info->pseudo_palette = (void *)par + sizeof(struct geodefb_par); info->var.grayscale = 0; /* CRT and panel options */ par->enable_crt = crt_option; if (parse_panel_option(info) < 0) printk(KERN_WARNING "gx1fb: invalid 'panel' option -- disabling flat panel\n"); if (!par->panel_x) par->enable_crt = 1; /* fall back to CRT if no panel is specified */ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { framebuffer_release(info); return NULL; } return info; } static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct geodefb_par *par; struct fb_info *info; int ret; info = gx1fb_init_fbinfo(&pdev->dev); if (!info) return -ENOMEM; par = info->par; /* GX1 display controller and CS5530 video device */ par->dc_ops = &gx1_dc_ops; par->vid_ops = &cs5530_vid_ops; if ((ret = gx1fb_map_video_memory(info, pdev)) < 0) { dev_err(&pdev->dev, "failed to map frame buffer or controller registers\n"); goto err; } ret = fb_find_mode(&info->var, info, mode_option, gx1_modedb, ARRAY_SIZE(gx1_modedb), NULL, 16); if (ret == 0 || ret == 4) { dev_err(&pdev->dev, "could not find valid video mode\n"); ret = -EINVAL; goto err; } /* Clear the frame buffer of garbage. */ memset_io(info->screen_base, 0, info->fix.smem_len); gx1fb_check_var(&info->var, info); gx1fb_set_par(info); if (register_framebuffer(info) < 0) { ret = -EINVAL; goto err; } pci_set_drvdata(pdev, info); printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; err: if (info->screen_base) { iounmap(info->screen_base); pci_release_region(pdev, 0); } if (par->vid_regs) { iounmap(par->vid_regs); pci_release_region(pdev, 1); } if (par->dc_regs) { iounmap(par->dc_regs); release_mem_region(gx1_gx_base() + 0x8300, 0x100); } if (info) { fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } return ret; } static void __devexit gx1fb_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct geodefb_par *par = info->par; unregister_framebuffer(info); iounmap((void __iomem *)info->screen_base); pci_release_region(pdev, 0); iounmap(par->vid_regs); pci_release_region(pdev, 1); iounmap(par->dc_regs); release_mem_region(gx1_gx_base() + 0x8300, 0x100); fb_dealloc_cmap(&info->cmap); pci_set_drvdata(pdev, NULL); framebuffer_release(info); } #ifndef MODULE static void __init gx1fb_setup(char *options) { char *this_opt; if (!options || !*options) return; while ((this_opt = strsep(&options, ","))) { if (!*this_opt) continue; if (!strncmp(this_opt, "mode:", 5)) strlcpy(mode_option, this_opt + 5, sizeof(mode_option)); else if (!strncmp(this_opt, "crt:", 4)) crt_option = !!simple_strtoul(this_opt + 4, NULL, 0); else if (!strncmp(this_opt, "panel:", 6)) strlcpy(panel_option, this_opt + 6, sizeof(panel_option)); else strlcpy(mode_option, this_opt, sizeof(mode_option)); } } #endif static struct pci_device_id gx1fb_id_table[] = { { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_VIDEO, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, gx1fb_id_table); static struct pci_driver gx1fb_driver = { .name = "gx1fb", .id_table = gx1fb_id_table, .probe = gx1fb_probe, .remove = __devexit_p(gx1fb_remove), }; static int __init gx1fb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("gx1fb", &option)) return -ENODEV; gx1fb_setup(option); #endif return pci_register_driver(&gx1fb_driver); } static void __devexit gx1fb_cleanup(void) { pci_unregister_driver(&gx1fb_driver); } module_init(gx1fb_init); module_exit(gx1fb_cleanup); module_param_string(mode, mode_option, sizeof(mode_option), 0444); MODULE_PARM_DESC(mode, "video mode (<x>x<y>[-<bpp>][@<refr>])"); module_param_named(crt, crt_option, int, 0444); MODULE_PARM_DESC(crt, "enable CRT output. 0 = off, 1 = on (default)"); module_param_string(panel, panel_option, sizeof(panel_option), 0444); MODULE_PARM_DESC(panel, "size of attached flat panel (<x>x<y>)"); MODULE_DESCRIPTION("framebuffer driver for the AMD Geode GX1"); MODULE_LICENSE("GPL");
gpl-2.0
krystianpe/massive-ninja
arch/mips/pci/ops-rc32434.c
9507
5308
/* * BRIEF MODULE DESCRIPTION * pci_ops for IDT EB434 board * * Copyright 2004 IDT Inc. (rischelp@idt.com) * Copyright 2006 Felix Fietkau <nbd@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/types.h> #include <asm/cpu.h> #include <asm/mach-rc32434/rc32434.h> #include <asm/mach-rc32434/pci.h> #define PCI_ACCESS_READ 0 #define PCI_ACCESS_WRITE 1 #define PCI_CFG_SET(bus, slot, func, off) \ (rc32434_pci->pcicfga = (0x80000000 | \ ((bus) << 16) | ((slot)<<11) | \ ((func)<<8) | (off))) static inline int config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, unsigned char where, u32 *data) { unsigned int slot = PCI_SLOT(devfn); u8 func = PCI_FUNC(devfn); /* Setup address */ PCI_CFG_SET(bus->number, slot, func, where); rc32434_sync(); if (access_type == PCI_ACCESS_WRITE) rc32434_pci->pcicfgd = *data; else *data = rc32434_pci->pcicfgd; rc32434_sync(); return 0; } /* * We can't address 8 and 16 bit words directly. Instead we have to * read/write a 32bit word and mask/modify the data we actually want. */ static int read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 *val) { u32 data; int ret; ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); *val = (data >> ((where & 3) << 3)) & 0xff; return ret; } static int read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 *val) { u32 data; int ret; ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); *val = (data >> ((where & 3) << 3)) & 0xffff; return ret; } static int read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 *val) { int ret; int delay = 1; /* * Don't scan too far, else there will be errors with plugged in * daughterboard (rb564). */ if (bus->number == 0 && PCI_SLOT(devfn) > 21) return 0; retry: ret = config_access(PCI_ACCESS_READ, bus, devfn, where, val); /* * Certain devices react delayed at device scan time, this * gives them time to settle */ if (where == PCI_VENDOR_ID) { if (ret == 0xffffffff || ret == 0x00000000 || ret == 0x0000ffff || ret == 0xffff0000) { if (delay > 4) return 0; delay *= 2; msleep(delay); goto retry; } } return ret; } static int write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) { u32 data = 0; if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return -1; data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) return -1; return PCIBIOS_SUCCESSFUL; } static int write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val) { u32 data = 0; if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) return -1; data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) return -1; return PCIBIOS_SUCCESSFUL; } static int write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) { if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val)) return -1; return PCIBIOS_SUCCESSFUL; } static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { switch (size) { case 1: return read_config_byte(bus, devfn, where, (u8 *) val); case 2: return read_config_word(bus, devfn, where, (u16 *) val); default: return read_config_dword(bus, devfn, where, val); } } static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { switch (size) { case 1: return write_config_byte(bus, devfn, where, (u8) val); case 2: return write_config_word(bus, devfn, where, (u16) val); default: return write_config_dword(bus, devfn, where, val); } } struct pci_ops rc32434_pci_ops = { .read = pci_config_read, .write = pci_config_write, };
gpl-2.0
dianlujitao/android_kernel_huawei_c8813q
drivers/usb/misc/diag_bridge.c
36
12435
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* add additional information to our printk's */ #define pr_fmt(fmt) "%s: " fmt "\n", __func__ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/debugfs.h> #include <mach/diag_bridge.h> #define DRIVER_DESC "USB host diag bridge driver" #define DRIVER_VERSION "1.0" struct diag_bridge { struct usb_device *udev; struct usb_interface *ifc; struct usb_anchor submitted; __u8 in_epAddr; __u8 out_epAddr; int err; struct kref kref; struct mutex ifc_mutex; struct diag_bridge_ops *ops; struct platform_device *pdev; /* debugging counters */ unsigned long bytes_to_host; unsigned long bytes_to_mdm; unsigned pending_reads; unsigned pending_writes; }; struct diag_bridge *__dev; int diag_bridge_open(struct diag_bridge_ops *ops) { struct diag_bridge *dev = __dev; if (!dev) { pr_err("dev is null"); return -ENODEV; } dev->ops = ops; dev->err = 0; kref_get(&dev->kref); return 0; } EXPORT_SYMBOL(diag_bridge_open); static void diag_bridge_delete(struct kref *kref) { struct diag_bridge *dev = container_of(kref, struct diag_bridge, kref); usb_put_dev(dev->udev); __dev = 0; kfree(dev); } void diag_bridge_close(void) { struct diag_bridge *dev = __dev; dev_dbg(&dev->ifc->dev, "%s:\n", __func__); usb_kill_anchored_urbs(&dev->submitted); dev->ops = 0; kref_put(&dev->kref, diag_bridge_delete); } EXPORT_SYMBOL(diag_bridge_close); static void diag_bridge_read_cb(struct urb *urb) { struct diag_bridge *dev = urb->context; struct diag_bridge_ops *cbs = dev->ops; dev_dbg(&dev->ifc->dev, "%s: status:%d actual:%d\n", __func__, urb->status, urb->actual_length); if (urb->status == -EPROTO) { dev_err(&dev->ifc->dev, "%s: proto error\n", __func__); /* save error so that subsequent read/write returns ENODEV */ dev->err = urb->status; kref_put(&dev->kref, diag_bridge_delete); return; } if (cbs && cbs->read_complete_cb) cbs->read_complete_cb(cbs->ctxt, urb->transfer_buffer, urb->transfer_buffer_length, urb->status < 0 ? urb->status : urb->actual_length); dev->bytes_to_host += urb->actual_length; dev->pending_reads--; kref_put(&dev->kref, diag_bridge_delete); } int diag_bridge_read(char *data, int size) { struct urb *urb = NULL; unsigned int pipe; struct diag_bridge *dev = __dev; int ret; pr_debug("reading %d bytes", size); if (!dev) { pr_err("device is disconnected"); return -ENODEV; } mutex_lock(&dev->ifc_mutex); if (!dev->ifc) { ret = -ENODEV; goto error; } if (!dev->ops) { pr_err("bridge is not open"); ret = -ENODEV; goto error; } if (!size) { dev_err(&dev->ifc->dev, "invalid size:%d\n", size); ret = -EINVAL; goto error; } /* if there was a previous unrecoverable error, just quit */ if (dev->err) { ret = -ENODEV; goto error; } kref_get(&dev->kref); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&dev->ifc->dev, "unable to allocate urb\n"); ret = -ENOMEM; goto put_error; } ret = usb_autopm_get_interface(dev->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("read: autopm_get failed:%d", ret); goto free_error; } pipe = usb_rcvbulkpipe(dev->udev, dev->in_epAddr); usb_fill_bulk_urb(urb, dev->udev, pipe, data, size, diag_bridge_read_cb, dev); usb_anchor_urb(urb, &dev->submitted); dev->pending_reads++; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err_ratelimited("submitting urb failed err:%d", ret); dev->pending_reads--; usb_unanchor_urb(urb); } usb_autopm_put_interface(dev->ifc); free_error: usb_free_urb(urb); put_error: if (ret) /* otherwise this is done in the completion handler */ kref_put(&dev->kref, diag_bridge_delete); error: mutex_unlock(&dev->ifc_mutex); return ret; } EXPORT_SYMBOL(diag_bridge_read); static void diag_bridge_write_cb(struct urb *urb) { struct diag_bridge *dev = urb->context; struct diag_bridge_ops *cbs = dev->ops; dev_dbg(&dev->ifc->dev, "%s:\n", __func__); usb_autopm_put_interface_async(dev->ifc); if (urb->status == -EPROTO) { dev_err(&dev->ifc->dev, "%s: proto error\n", __func__); /* save error so that subsequent read/write returns ENODEV */ dev->err = urb->status; kref_put(&dev->kref, diag_bridge_delete); return; } if (cbs && cbs->write_complete_cb) cbs->write_complete_cb(cbs->ctxt, urb->transfer_buffer, urb->transfer_buffer_length, urb->status < 0 ? urb->status : urb->actual_length); dev->bytes_to_mdm += urb->actual_length; dev->pending_writes--; kref_put(&dev->kref, diag_bridge_delete); } int diag_bridge_write(char *data, int size) { struct urb *urb = NULL; unsigned int pipe; struct diag_bridge *dev = __dev; int ret; pr_debug("writing %d bytes", size); if (!dev) { pr_err("device is disconnected"); return -ENODEV; } mutex_lock(&dev->ifc_mutex); if (!dev->ifc) { ret = -ENODEV; goto error; } if (!dev->ops) { pr_err("bridge is not open"); ret = -ENODEV; goto error; } if (!size) { dev_err(&dev->ifc->dev, "invalid size:%d\n", size); ret = -EINVAL; goto error; } /* if there was a previous unrecoverable error, just quit */ if (dev->err) { ret = -ENODEV; goto error; } kref_get(&dev->kref); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { dev_err(&dev->ifc->dev, "unable to allocate urb\n"); ret = -ENOMEM; goto put_error; } ret = usb_autopm_get_interface(dev->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("write: autopm_get failed:%d", ret); goto free_error; } pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr); usb_fill_bulk_urb(urb, dev->udev, pipe, data, size, diag_bridge_write_cb, dev); urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(urb, &dev->submitted); dev->pending_writes++; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err_ratelimited("submitting urb failed err:%d", ret); dev->pending_writes--; usb_unanchor_urb(urb); usb_autopm_put_interface(dev->ifc); goto free_error; } free_error: usb_free_urb(urb); put_error: if (ret) /* otherwise this is done in the completion handler */ kref_put(&dev->kref, diag_bridge_delete); error: mutex_unlock(&dev->ifc_mutex); return ret; } EXPORT_SYMBOL(diag_bridge_write); #if defined(CONFIG_DEBUG_FS) #define DEBUG_BUF_SIZE 512 static ssize_t diag_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct diag_bridge *dev = __dev; char *buf; int ret; if (!dev) return -ENODEV; buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = scnprintf(buf, DEBUG_BUF_SIZE, "epin:%d, epout:%d\n" "bytes to host: %lu\n" "bytes to mdm: %lu\n" "pending reads: %u\n" "pending writes: %u\n" "last error: %d\n", dev->in_epAddr, dev->out_epAddr, dev->bytes_to_host, dev->bytes_to_mdm, dev->pending_reads, dev->pending_writes, dev->err); ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret); kfree(buf); return ret; } static ssize_t diag_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct diag_bridge *dev = __dev; if (dev) { dev->bytes_to_host = dev->bytes_to_mdm = 0; dev->pending_reads = dev->pending_writes = 0; } return count; } const struct file_operations diag_stats_ops = { .read = diag_read_stats, .write = diag_reset_stats, }; static struct dentry *dent; static void diag_bridge_debugfs_init(void) { struct dentry *dfile; dent = debugfs_create_dir("diag_bridge", 0); if (IS_ERR(dent)) return; dfile = debugfs_create_file("status", 0444, dent, 0, &diag_stats_ops); if (!dfile || IS_ERR(dfile)) debugfs_remove(dent); } static void diag_bridge_debugfs_cleanup(void) { if (dent) { debugfs_remove_recursive(dent); dent = NULL; } } #else static inline void diag_bridge_debugfs_init(void) { } static inline void diag_bridge_debugfs_cleanup(void) { } #endif static int diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id) { struct diag_bridge *dev; struct usb_host_interface *ifc_desc; struct usb_endpoint_descriptor *ep_desc; int i; int ret = -ENOMEM; __u8 ifc_num; pr_debug("id:%lu", id->driver_info); ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber; /* is this interface supported ? */ if (ifc_num != id->driver_info) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { pr_err("unable to allocate dev"); return -ENOMEM; } dev->pdev = platform_device_alloc("diag_bridge", -1); if (!dev->pdev) { pr_err("unable to allocate platform device"); kfree(dev); return -ENOMEM; } __dev = dev; dev->udev = usb_get_dev(interface_to_usbdev(ifc)); dev->ifc = ifc; kref_init(&dev->kref); mutex_init(&dev->ifc_mutex); init_usb_anchor(&dev->submitted); ifc_desc = ifc->cur_altsetting; for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) { ep_desc = &ifc_desc->endpoint[i].desc; if (!dev->in_epAddr && usb_endpoint_is_bulk_in(ep_desc)) dev->in_epAddr = ep_desc->bEndpointAddress; if (!dev->out_epAddr && usb_endpoint_is_bulk_out(ep_desc)) dev->out_epAddr = ep_desc->bEndpointAddress; } if (!(dev->in_epAddr && dev->out_epAddr)) { pr_err("could not find bulk in and bulk out endpoints"); ret = -ENODEV; goto error; } usb_set_intfdata(ifc, dev); diag_bridge_debugfs_init(); platform_device_add(dev->pdev); dev_dbg(&dev->ifc->dev, "%s: complete\n", __func__); return 0; error: if (dev) kref_put(&dev->kref, diag_bridge_delete); return ret; } static void diag_bridge_disconnect(struct usb_interface *ifc) { struct diag_bridge *dev = usb_get_intfdata(ifc); dev_dbg(&dev->ifc->dev, "%s:\n", __func__); platform_device_unregister(dev->pdev); mutex_lock(&dev->ifc_mutex); dev->ifc = NULL; mutex_unlock(&dev->ifc_mutex); diag_bridge_debugfs_cleanup(); kref_put(&dev->kref, diag_bridge_delete); usb_set_intfdata(ifc, NULL); } static int diag_bridge_suspend(struct usb_interface *ifc, pm_message_t message) { struct diag_bridge *dev = usb_get_intfdata(ifc); struct diag_bridge_ops *cbs = dev->ops; int ret = 0; if (cbs && cbs->suspend) { ret = cbs->suspend(cbs->ctxt); if (ret) { dev_dbg(&dev->ifc->dev, "%s: diag veto'd suspend\n", __func__); return ret; } usb_kill_anchored_urbs(&dev->submitted); } return ret; } static int diag_bridge_resume(struct usb_interface *ifc) { struct diag_bridge *dev = usb_get_intfdata(ifc); struct diag_bridge_ops *cbs = dev->ops; if (cbs && cbs->resume) cbs->resume(cbs->ctxt); return 0; } #define VALID_INTERFACE_NUM 0 static const struct usb_device_id diag_bridge_ids[] = { { USB_DEVICE(0x5c6, 0x9001), .driver_info = VALID_INTERFACE_NUM, }, { USB_DEVICE(0x5c6, 0x9034), .driver_info = VALID_INTERFACE_NUM, }, { USB_DEVICE(0x5c6, 0x9048), .driver_info = VALID_INTERFACE_NUM, }, { USB_DEVICE(0x5c6, 0x904C), .driver_info = VALID_INTERFACE_NUM, }, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, diag_bridge_ids); static struct usb_driver diag_bridge_driver = { .name = "diag_bridge", .probe = diag_bridge_probe, .disconnect = diag_bridge_disconnect, .suspend = diag_bridge_suspend, .resume = diag_bridge_resume, .id_table = diag_bridge_ids, .supports_autosuspend = 1, }; static int __init diag_bridge_init(void) { int ret; ret = usb_register(&diag_bridge_driver); if (ret) { pr_err("unable to register diag driver"); return ret; } return 0; } static void __exit diag_bridge_exit(void) { usb_deregister(&diag_bridge_driver); } module_init(diag_bridge_init); module_exit(diag_bridge_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2");
gpl-2.0
BTDC/coreboot
src/vendorcode/amd/agesa/f15/Proc/CPU/Family/0x10/F10InitEarlyTable.c
36
5628
/* $NoKeywords:$ */ /** * @file * * Initialize the Family 10h specific way of running early initialization. * * Returns the table of initialization steps to perform at * AmdInitEarly. * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: CPU/FAMILY/0x10 * @e \$Revision: 56279 $ @e \$Date: 2011-07-11 13:11:28 -0600 (Mon, 11 Jul 2011) $ * */ /* ****************************************************************************** * * Copyright (C) 2012 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /*---------------------------------------------------------------------------------------- * M O D U L E S U S E D *---------------------------------------------------------------------------------------- */ #include "AGESA.h" #include "cpuFamilyTranslation.h" #include "Filecode.h" #include "cpuEarlyInit.h" CODE_GROUP (G1_PEICC) RDATA_GROUP (G2_PEI) #define FILECODE PROC_CPU_FAMILY_0X10_F10INITEARLYTABLE_FILECODE /*---------------------------------------------------------------------------------------- * D E F I N I T I O N S A N D M A C R O S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * T Y P E D E F S A N D S T R U C T U R E S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * P R O T O T Y P E S O F L O C A L F U N C T I O N S *---------------------------------------------------------------------------------------- */ VOID GetF10EarlyInitOnCoreTable ( IN CPU_SPECIFIC_SERVICES *FamilyServices, OUT CONST S_PERFORM_EARLY_INIT_ON_CORE **Table, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ); /*---------------------------------------------------------------------------------------- * E X P O R T E D F U N C T I O N S *---------------------------------------------------------------------------------------- */ extern F_PERFORM_EARLY_INIT_ON_CORE McaInitializationAtEarly; extern F_PERFORM_EARLY_INIT_ON_CORE SetRegistersFromTablesAtEarly; extern F_PERFORM_EARLY_INIT_ON_CORE SetBrandIdRegistersAtEarly; extern F_PERFORM_EARLY_INIT_ON_CORE LocalApicInitializationAtEarly; extern F_PERFORM_EARLY_INIT_ON_CORE LoadMicrocodePatchAtEarly; CONST S_PERFORM_EARLY_INIT_ON_CORE ROMDATA F10EarlyInitOnCoreTable[] = { {McaInitializationAtEarly, PERFORM_EARLY_ANY_CONDITION}, {SetRegistersFromTablesAtEarly, PERFORM_EARLY_ANY_CONDITION}, {SetBrandIdRegistersAtEarly, PERFORM_EARLY_ANY_CONDITION}, {LocalApicInitializationAtEarly, PERFORM_EARLY_ANY_CONDITION}, {LoadMicrocodePatchAtEarly, PERFORM_EARLY_WARM_RESET}, {NULL, 0} }; /*------------------------------------------------------------------------------------*/ /** * Initializer routine that may be invoked at AmdCpuEarly to return the steps that a * processor that uses the standard initialization steps should take. * * @CpuServiceMethod{::F_GET_EARLY_INIT_TABLE}. * * @param[in] FamilyServices The current Family Specific Services. * @param[out] Table Table of appropriate init steps for the executing core. * @param[in] EarlyParams Service Interface structure to initialize. * @param[in] StdHeader Opaque handle to standard config header. * */ VOID GetF10EarlyInitOnCoreTable ( IN CPU_SPECIFIC_SERVICES *FamilyServices, OUT CONST S_PERFORM_EARLY_INIT_ON_CORE **Table, IN AMD_CPU_EARLY_PARAMS *EarlyParams, IN AMD_CONFIG_PARAMS *StdHeader ) { *Table = F10EarlyInitOnCoreTable; }
gpl-2.0
abhijeet-dev/linux-samsung
drivers/net/can/sja1000/peak_pci.c
36
19914
/* * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> * * Derived from the PCAN project file driver/src/pcan_pci.c: * * Copyright (C) 2001-2006 PEAK System-Technik GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/can.h> #include <linux/can/dev.h> #include "sja1000.h" MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards"); MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards"); MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" struct peak_pciec_card; struct peak_pci_chan { void __iomem *cfg_base; /* Common for all channels */ struct net_device *prev_dev; /* Chain of network devices */ u16 icr_mask; /* Interrupt mask for fast ack */ struct peak_pciec_card *pciec_card; /* only for PCIeC LEDs */ }; #define PEAK_PCI_CAN_CLOCK (16000000 / 2) #define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define PEAK_PCI_OCR OCR_TX0_PUSHPULL /* * Important PITA registers */ #define PITA_ICR 0x00 /* Interrupt control register */ #define PITA_GPIOICR 0x18 /* GPIO interface control register */ #define PITA_MISC 0x1C /* Miscellaneous register */ #define PEAK_PCI_CFG_SIZE 0x1000 /* Size of the config PCI bar */ #define PEAK_PCI_CHAN_SIZE 0x0400 /* Size used by the channel */ #define PEAK_PCI_VENDOR_ID 0x001C /* The PCI device and vendor IDs */ #define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */ #define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */ #define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */ #define PEAK_CPCI_DEVICE_ID 0x0004 /* for nextgen cPCI slot cards */ #define PEAK_MPCI_DEVICE_ID 0x0005 /* for nextgen miniPCI slot cards */ #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ #define PEAK_PCI_CHAN_MAX 4 static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = { 0x02, 0x01, 0x40, 0x80 }; static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #endif {0,} }; MODULE_DEVICE_TABLE(pci, peak_pci_tbl); #ifdef CONFIG_CAN_PEAK_PCIEC /* * PCAN-ExpressCard needs I2C bit-banging configuration option. */ /* GPIOICR byte access offsets */ #define PITA_GPOUT 0x18 /* GPx output value */ #define PITA_GPIN 0x19 /* GPx input value */ #define PITA_GPOEN 0x1A /* configure GPx as ouput pin */ /* I2C GP bits */ #define PITA_GPIN_SCL 0x01 /* Serial Clock Line */ #define PITA_GPIN_SDA 0x04 /* Serial DAta line */ #define PCA9553_1_SLAVEADDR (0xC4 >> 1) /* PCA9553 LS0 fields values */ enum { PCA9553_LOW, PCA9553_HIGHZ, PCA9553_PWM0, PCA9553_PWM1 }; /* LEDs control */ #define PCA9553_ON PCA9553_LOW #define PCA9553_OFF PCA9553_HIGHZ #define PCA9553_SLOW PCA9553_PWM0 #define PCA9553_FAST PCA9553_PWM1 #define PCA9553_LED(c) (1 << (c)) #define PCA9553_LED_STATE(s, c) ((s) << ((c) << 1)) #define PCA9553_LED_ON(c) PCA9553_LED_STATE(PCA9553_ON, c) #define PCA9553_LED_OFF(c) PCA9553_LED_STATE(PCA9553_OFF, c) #define PCA9553_LED_SLOW(c) PCA9553_LED_STATE(PCA9553_SLOW, c) #define PCA9553_LED_FAST(c) PCA9553_LED_STATE(PCA9553_FAST, c) #define PCA9553_LED_MASK(c) PCA9553_LED_STATE(0x03, c) #define PCA9553_LED_OFF_ALL (PCA9553_LED_OFF(0) | PCA9553_LED_OFF(1)) #define PCA9553_LS0_INIT 0x40 /* initial value (!= from 0x00) */ struct peak_pciec_chan { struct net_device *netdev; unsigned long prev_rx_bytes; unsigned long prev_tx_bytes; }; struct peak_pciec_card { void __iomem *cfg_base; /* Common for all channels */ void __iomem *reg_base; /* first channel base address */ u8 led_cache; /* leds state cache */ /* PCIExpressCard i2c data */ struct i2c_algo_bit_data i2c_bit; struct i2c_adapter led_chip; struct delayed_work led_work; /* led delayed work */ int chan_count; struct peak_pciec_chan channel[PEAK_PCI_CHAN_MAX]; }; /* "normal" pci register write callback is overloaded for leds control */ static void peak_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val); static inline void pita_set_scl_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static inline void pita_set_sda_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static void peak_pciec_init_pita_gpio(struct peak_pciec_card *card) { /* raise SCL & SDA GPIOs to high-Z */ pita_set_scl_highz(card); pita_set_sda_highz(card); } static void pita_setsda(void *data, int state) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; u8 gp_out, gp_outen; /* set output sda always to 0 */ gp_out = readb(card->cfg_base + PITA_GPOUT) & ~PITA_GPIN_SDA; writeb(gp_out, card->cfg_base + PITA_GPOUT); /* control output sda with GPOEN */ gp_outen = readb(card->cfg_base + PITA_GPOEN); if (state) gp_outen &= ~PITA_GPIN_SDA; else gp_outen |= PITA_GPIN_SDA; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static void pita_setscl(void *data, int state) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; u8 gp_out, gp_outen; /* set output scl always to 0 */ gp_out = readb(card->cfg_base + PITA_GPOUT) & ~PITA_GPIN_SCL; writeb(gp_out, card->cfg_base + PITA_GPOUT); /* control output scl with GPOEN */ gp_outen = readb(card->cfg_base + PITA_GPOEN); if (state) gp_outen &= ~PITA_GPIN_SCL; else gp_outen |= PITA_GPIN_SCL; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static int pita_getsda(void *data) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; /* set tristate */ pita_set_sda_highz(card); return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SDA) ? 1 : 0; } static int pita_getscl(void *data) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; /* set tristate */ pita_set_scl_highz(card); return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0; } /* * write commands to the LED chip though the I2C-bus of the PCAN-PCIeC */ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, u8 offset, u8 data) { u8 buffer[2] = { offset, data }; struct i2c_msg msg = { .addr = PCA9553_1_SLAVEADDR, .len = 2, .buf = buffer, }; int ret; /* cache led mask */ if ((offset == 5) && (data == card->led_cache)) return 0; ret = i2c_transfer(&card->led_chip, &msg, 1); if (ret < 0) return ret; if (offset == 5) card->led_cache = data; return 0; } /* * delayed work callback used to control the LEDs */ static void peak_pciec_led_work(struct work_struct *work) { struct peak_pciec_card *card = container_of(work, struct peak_pciec_card, led_work.work); struct net_device *netdev; u8 new_led = card->led_cache; int i, up_count = 0; /* first check what is to do */ for (i = 0; i < card->chan_count; i++) { /* default is: not configured */ new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_ON(i); netdev = card->channel[i].netdev; if (!netdev || !(netdev->flags & IFF_UP)) continue; up_count++; /* no activity (but configured) */ new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_SLOW(i); /* if bytes counters changed, set fast blinking led */ if (netdev->stats.rx_bytes != card->channel[i].prev_rx_bytes) { card->channel[i].prev_rx_bytes = netdev->stats.rx_bytes; new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_FAST(i); } if (netdev->stats.tx_bytes != card->channel[i].prev_tx_bytes) { card->channel[i].prev_tx_bytes = netdev->stats.tx_bytes; new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_FAST(i); } } /* check if LS0 settings changed, only update i2c if so */ peak_pciec_write_pca9553(card, 5, new_led); /* restart timer (except if no more configured channels) */ if (up_count) schedule_delayed_work(&card->led_work, HZ); } /* * set LEDs blinking state */ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) { u8 new_led = card->led_cache; int i; /* first check what is to do */ for (i = 0; i < card->chan_count; i++) if (led_mask & PCA9553_LED(i)) { new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_STATE(s, i); } /* check if LS0 settings changed, only update i2c if so */ peak_pciec_write_pca9553(card, 5, new_led); } /* * start one second delayed work to control LEDs */ static void peak_pciec_start_led_work(struct peak_pciec_card *card) { schedule_delayed_work(&card->led_work, HZ); } /* * stop LEDs delayed work */ static void peak_pciec_stop_led_work(struct peak_pciec_card *card) { cancel_delayed_work_sync(&card->led_work); } /* * initialize the PCA9553 4-bit I2C-bus LED chip */ static int peak_pciec_init_leds(struct peak_pciec_card *card) { int err; /* prescaler for frequency 0: "SLOW" = 1 Hz = "44" */ err = peak_pciec_write_pca9553(card, 1, 44 / 1); if (err) return err; /* duty cycle 0: 50% */ err = peak_pciec_write_pca9553(card, 2, 0x80); if (err) return err; /* prescaler for frequency 1: "FAST" = 5 Hz */ err = peak_pciec_write_pca9553(card, 3, 44 / 5); if (err) return err; /* duty cycle 1: 50% */ err = peak_pciec_write_pca9553(card, 4, 0x80); if (err) return err; /* switch LEDs to initial state */ return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT); } /* * restore LEDs state to off peak_pciec_leds_exit */ static void peak_pciec_leds_exit(struct peak_pciec_card *card) { /* switch LEDs to off */ peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL); } /* * normal write sja1000 register method overloaded to catch when controller * is started or stopped, to control leds */ static void peak_pciec_write_reg(const struct sja1000_priv *priv, int port, u8 val) { struct peak_pci_chan *chan = priv->priv; struct peak_pciec_card *card = chan->pciec_card; int c = (priv->reg_base - card->reg_base) / PEAK_PCI_CHAN_SIZE; /* sja1000 register changes control the leds state */ if (port == SJA1000_MOD) switch (val) { case MOD_RM: /* Reset Mode: set led on */ peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_ON); break; case 0x00: /* Normal Mode: led slow blinking and start led timer */ peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_SLOW); peak_pciec_start_led_work(card); break; default: break; } /* call base function */ peak_pci_write_reg(priv, port, val); } static struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = { .setsda = pita_setsda, .setscl = pita_setscl, .getsda = pita_getsda, .getscl = pita_getscl, .udelay = 10, .timeout = HZ, }; static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct peak_pci_chan *chan = priv->priv; struct peak_pciec_card *card; int err; /* copy i2c object address from 1st channel */ if (chan->prev_dev) { struct sja1000_priv *prev_priv = netdev_priv(chan->prev_dev); struct peak_pci_chan *prev_chan = prev_priv->priv; card = prev_chan->pciec_card; if (!card) return -ENODEV; /* channel is the first one: do the init part */ } else { /* create the bit banging I2C adapter structure */ card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL); if (!card) return -ENOMEM; card->cfg_base = chan->cfg_base; card->reg_base = priv->reg_base; card->led_chip.owner = THIS_MODULE; card->led_chip.dev.parent = &pdev->dev; card->led_chip.algo_data = &card->i2c_bit; strncpy(card->led_chip.name, "peak_i2c", sizeof(card->led_chip.name)); card->i2c_bit = peak_pciec_i2c_bit_ops; card->i2c_bit.udelay = 10; card->i2c_bit.timeout = HZ; card->i2c_bit.data = card; peak_pciec_init_pita_gpio(card); err = i2c_bit_add_bus(&card->led_chip); if (err) { dev_err(&pdev->dev, "i2c init failed\n"); goto pciec_init_err_1; } err = peak_pciec_init_leds(card); if (err) { dev_err(&pdev->dev, "leds hardware init failed\n"); goto pciec_init_err_2; } INIT_DELAYED_WORK(&card->led_work, peak_pciec_led_work); /* PCAN-ExpressCard needs its own callback for leds */ priv->write_reg = peak_pciec_write_reg; } chan->pciec_card = card; card->channel[card->chan_count++].netdev = dev; return 0; pciec_init_err_2: i2c_del_adapter(&card->led_chip); pciec_init_err_1: peak_pciec_init_pita_gpio(card); kfree(card); return err; } static void peak_pciec_remove(struct peak_pciec_card *card) { peak_pciec_stop_led_work(card); peak_pciec_leds_exit(card); i2c_del_adapter(&card->led_chip); peak_pciec_init_pita_gpio(card); kfree(card); } #else /* CONFIG_CAN_PEAK_PCIEC */ /* * Placebo functions when PCAN-ExpressCard support is not selected */ static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) { return -ENODEV; } static inline void peak_pciec_remove(struct peak_pciec_card *card) { } #endif /* CONFIG_CAN_PEAK_PCIEC */ static u8 peak_pci_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + (port << 2)); } static void peak_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + (port << 2)); } static void peak_pci_post_irq(const struct sja1000_priv *priv) { struct peak_pci_chan *chan = priv->priv; u16 icr; /* Select and clear in PITA stored interrupt */ icr = readw(chan->cfg_base + PITA_ICR); if (icr & chan->icr_mask) writew(chan->icr_mask, chan->cfg_base + PITA_ICR); } static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, DRV_NAME); if (err) goto failure_disable_pci; err = pci_read_config_word(pdev, 0x2e, &sub_sys_id); if (err) goto failure_release_regions; dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", pdev->vendor, pdev->device, sub_sys_id); err = pci_write_config_word(pdev, 0x44, 0); if (err) goto failure_release_regions; if (sub_sys_id >= 12) channels = 4; else if (sub_sys_id >= 10) channels = 3; else if (sub_sys_id >= 4) channels = 2; else channels = 1; cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE); if (!cfg_base) { dev_err(&pdev->dev, "failed to map PCI resource #0\n"); err = -ENOMEM; goto failure_release_regions; } reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels); if (!reg_base) { dev_err(&pdev->dev, "failed to map PCI resource #1\n"); err = -ENOMEM; goto failure_unmap_cfg_base; } /* Set GPIO control register */ writew(0x0005, cfg_base + PITA_GPIOICR + 2); /* Enable all channels of this card */ writeb(0x00, cfg_base + PITA_GPIOICR); /* Toggle reset */ writeb(0x05, cfg_base + PITA_MISC + 3); mdelay(5); /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); icr = readw(cfg_base + PITA_ICR + 2); for (i = 0; i < channels; i++) { dev = alloc_sja1000dev(sizeof(struct peak_pci_chan)); if (!dev) { err = -ENOMEM; goto failure_remove_channels; } priv = netdev_priv(dev); chan = priv->priv; chan->cfg_base = cfg_base; priv->reg_base = reg_base + i * PEAK_PCI_CHAN_SIZE; priv->read_reg = peak_pci_read_reg; priv->write_reg = peak_pci_write_reg; priv->post_irq = peak_pci_post_irq; priv->can.clock.freq = PEAK_PCI_CAN_CLOCK; priv->ocr = PEAK_PCI_OCR; priv->cdr = PEAK_PCI_CDR; /* Neither a slave nor a single device distributes the clock */ if (channels == 1 || i > 0) priv->cdr |= CDR_CLK_OFF; /* Setup interrupt handling */ priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; chan->icr_mask = peak_pci_icr_masks[i]; icr |= chan->icr_mask; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; /* Create chain of SJA1000 devices */ chan->prev_dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, dev); /* * PCAN-ExpressCard needs some additional i2c init. * This must be done *before* register_sja1000dev() but * *after* devices linkage */ if (pdev->device == PEAK_PCIEC_DEVICE_ID) { err = peak_pciec_probe(pdev, dev); if (err) { dev_err(&pdev->dev, "failed to probe device (err %d)\n", err); goto failure_free_dev; } } err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "failed to register device\n"); goto failure_free_dev; } dev_info(&pdev->dev, "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n", dev->name, priv->reg_base, chan->cfg_base, dev->irq); } /* Enable interrupts */ writew(icr, cfg_base + PITA_ICR + 2); return 0; failure_free_dev: pci_set_drvdata(pdev, chan->prev_dev); free_sja1000dev(dev); failure_remove_channels: /* Disable interrupts */ writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; prev_dev = chan->prev_dev; unregister_sja1000dev(dev); free_sja1000dev(dev); } /* free any PCIeC resources too */ if (chan && chan->pciec_card) peak_pciec_remove(chan->pciec_card); pci_iounmap(pdev, reg_base); failure_unmap_cfg_base: pci_iounmap(pdev, cfg_base); failure_release_regions: pci_release_regions(pdev); failure_disable_pci: pci_disable_device(pdev); return err; } static void peak_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); /* Last device */ struct sja1000_priv *priv = netdev_priv(dev); struct peak_pci_chan *chan = priv->priv; void __iomem *cfg_base = chan->cfg_base; void __iomem *reg_base = priv->reg_base; /* Disable interrupts */ writew(0x0, cfg_base + PITA_ICR + 2); /* Loop over all registered devices */ while (1) { struct net_device *prev_dev = chan->prev_dev; dev_info(&pdev->dev, "removing device %s\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); dev = prev_dev; if (!dev) { /* do that only for first channel */ if (chan->pciec_card) peak_pciec_remove(chan->pciec_card); break; } priv = netdev_priv(dev); chan = priv->priv; } pci_iounmap(pdev, reg_base); pci_iounmap(pdev, cfg_base); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver peak_pci_driver = { .name = DRV_NAME, .id_table = peak_pci_tbl, .probe = peak_pci_probe, .remove = peak_pci_remove, }; module_pci_driver(peak_pci_driver);
gpl-2.0
varun10221/linux
drivers/pci/host/pcie-iproc-bcma.c
548
2990
/* * Copyright (C) 2015 Broadcom Corporation * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/phy/phy.h> #include <linux/bcma/bcma.h> #include <linux/ioport.h> #include "pcie-iproc.h" /* NS: CLASS field is R/O, and set to wrong 0x200 value */ static void bcma_pcie2_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_sys_data *sys = dev->sysdata; struct iproc_pcie *pcie = sys->private_data; struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); return bcma_core_irq(bdev, 5); } static int iproc_pcie_bcma_probe(struct bcma_device *bdev) { struct iproc_pcie *pcie; LIST_HEAD(res); struct resource res_mem; int ret; pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->dev = &bdev->dev; bcma_set_drvdata(bdev, pcie); pcie->base = bdev->io_addr; res_mem.start = bdev->addr_s[0]; res_mem.end = bdev->addr_s[0] + SZ_128M - 1; res_mem.name = "PCIe MEM space"; res_mem.flags = IORESOURCE_MEM; pci_add_resource(&res, &res_mem); pcie->map_irq = iproc_pcie_bcma_map_irq; ret = iproc_pcie_setup(pcie, &res); if (ret) dev_err(pcie->dev, "PCIe controller setup failed\n"); pci_free_resource_list(&res); return ret; } static void iproc_pcie_bcma_remove(struct bcma_device *bdev) { struct iproc_pcie *pcie = bcma_get_drvdata(bdev); iproc_pcie_remove(pcie); } static const struct bcma_device_id iproc_pcie_bcma_table[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), {}, }; MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); static struct bcma_driver iproc_pcie_bcma_driver = { .name = KBUILD_MODNAME, .id_table = iproc_pcie_bcma_table, .probe = iproc_pcie_bcma_probe, .remove = iproc_pcie_bcma_remove, }; static int __init iproc_pcie_bcma_init(void) { return bcma_driver_register(&iproc_pcie_bcma_driver); } module_init(iproc_pcie_bcma_init); static void __exit iproc_pcie_bcma_exit(void) { bcma_driver_unregister(&iproc_pcie_bcma_driver); } module_exit(iproc_pcie_bcma_exit); MODULE_AUTHOR("Hauke Mehrtens"); MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
gunine/htc-rider-aosp-kernel
drivers/watchdog/sb_wdog.c
804
8683
/* * Watchdog driver for SiByte SB1 SoCs * * Copyright (C) 2007 OnStor, Inc. * Andrew Sharp <andy.sharp@lsi.com> * * This driver is intended to make the second of two hardware watchdogs * on the Sibyte 12XX and 11XX SoCs available to the user. There are two * such devices available on the SoC, but it seems that there isn't an * enumeration class for watchdogs in Linux like there is for RTCs. * The second is used rather than the first because it uses IRQ 1, * thereby avoiding all that IRQ 0 problematic nonsense. * * I have not tried this driver on a 1480 processor; it might work * just well enough to really screw things up. * * It is a simple timer, and there is an interrupt that is raised the * first time the timer expires. The second time it expires, the chip * is reset and there is no way to redirect that NMI. Which could * be problematic in some cases where this chip is sitting on the HT * bus and has just taken responsibility for providing a cache block. * Since the reset can't be redirected to the external reset pin, it is * possible that other HT connected processors might hang and not reset. * For Linux, a soft reset would probably be even worse than a hard reset. * There you have it. * * The timer takes 23 bits of a 64 bit register (?) as a count value, * and decrements the count every microsecond, for a max value of * 0x7fffff usec or about 8.3ish seconds. * * This watchdog borrows some user semantics from the softdog driver, * in that if you close the fd, it leaves the watchdog running, unless * you previously wrote a 'V' to the fd, in which case it disables * the watchdog when you close the fd like some other drivers. * * Based on various other watchdog drivers, which are probably all * loosely based on something Alan Cox wrote years ago. * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 1 or 2 as published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/interrupt.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> static DEFINE_SPINLOCK(sbwd_lock); /* * set the initial count value of a timer * * wdog is the iomem address of the cfg register */ void sbwdog_set(char __iomem *wdog, unsigned long t) { spin_lock(&sbwd_lock); __raw_writeb(0, wdog); __raw_writeq(t & 0x7fffffUL, wdog - 0x10); spin_unlock(&sbwd_lock); } /* * cause the timer to [re]load it's initial count and start counting * all over again * * wdog is the iomem address of the cfg register */ void sbwdog_pet(char __iomem *wdog) { spin_lock(&sbwd_lock); __raw_writeb(__raw_readb(wdog) | 1, wdog); spin_unlock(&sbwd_lock); } static unsigned long sbwdog_gate; /* keeps it to one thread only */ static char __iomem *kern_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_0)); static char __iomem *user_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_1)); static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */ static int expect_close; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "SiByte Watchdog", }; /* * Allow only a single thread to walk the dog */ static int sbwdog_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); if (test_and_set_bit(0, &sbwdog_gate)) return -EBUSY; __module_get(THIS_MODULE); /* * Activate the timer */ sbwdog_set(user_dog, timeout); __raw_writeb(1, user_dog); return 0; } /* * Put the dog back in the kennel. */ static int sbwdog_release(struct inode *inode, struct file *file) { if (expect_close == 42) { __raw_writeb(0, user_dog); module_put(THIS_MODULE); } else { printk(KERN_CRIT "%s: Unexpected close, not stopping watchdog!\n", ident.identity); sbwdog_pet(user_dog); } clear_bit(0, &sbwdog_gate); expect_close = 0; return 0; } /* * 42 - the answer */ static ssize_t sbwdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { int i; if (len) { /* * restart the timer */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } sbwdog_pet(user_dog); } return len; } static long sbwdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; unsigned long time; void __user *argp = (void __user *)arg; int __user *p = argp; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_KEEPALIVE: sbwdog_pet(user_dog); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, p); if (ret) break; time *= 1000000; if (time > 0x7fffffUL) { ret = -EINVAL; break; } timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); case WDIOC_GETTIMEOUT: /* * get the remaining count from the ... count register * which is 1*8 before the config register */ ret = put_user(__raw_readq(user_dog - 8) / 1000000, p); break; } return ret; } /* * Notifier for system down */ static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf) { if (code == SYS_DOWN || code == SYS_HALT) { /* * sit and sit */ __raw_writeb(0, user_dog); __raw_writeb(0, kern_dog); } return NOTIFY_DONE; } static const struct file_operations sbwdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sbwdog_write, .unlocked_ioctl = sbwdog_ioctl, .open = sbwdog_open, .release = sbwdog_release, }; static struct miscdevice sbwdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &sbwdog_fops, }; static struct notifier_block sbwdog_notifier = { .notifier_call = sbwdog_notify_sys, }; /* * interrupt handler * * doesn't do a whole lot for user, but oh so cleverly written so kernel * code can use it to re-up the watchdog, thereby saving the kernel from * having to create and maintain a timer, just to tickle another timer, * which is just so wrong. */ irqreturn_t sbwdog_interrupt(int irq, void *addr) { unsigned long wd_init; char *wd_cfg_reg = (char *)addr; u8 cfg; cfg = __raw_readb(wd_cfg_reg); wd_init = __raw_readq(wd_cfg_reg - 8) & 0x7fffff; /* * if it's the second watchdog timer, it's for those users */ if (wd_cfg_reg == user_dog) printk(KERN_CRIT "%s in danger of initiating system reset " "in %ld.%01ld seconds\n", ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); else cfg |= 1; __raw_writeb(cfg, wd_cfg_reg); return IRQ_HANDLED; } static int __init sbwdog_init(void) { int ret; /* * register a reboot notifier */ ret = register_reboot_notifier(&sbwdog_notifier); if (ret) { printk(KERN_ERR "%s: cannot register reboot notifier (err=%d)\n", ident.identity, ret); return ret; } /* * get the resources */ ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED, ident.identity, (void *)user_dog); if (ret) { printk(KERN_ERR "%s: failed to request irq 1 - %d\n", ident.identity, ret); return ret; } ret = misc_register(&sbwdog_miscdev); if (ret == 0) { printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", ident.identity, timeout / 1000000, (timeout / 100000) % 10); } else free_irq(1, (void *)user_dog); return ret; } static void __exit sbwdog_exit(void) { misc_deregister(&sbwdog_miscdev); } module_init(sbwdog_init); module_exit(sbwdog_exit); MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>"); MODULE_DESCRIPTION("SiByte Watchdog"); module_param(timeout, ulong, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* * example code that can be put in a platform code area to utilize the * first watchdog timer for the kernels own purpose. void platform_wd_setup(void) { int ret; ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED, "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0)); if (ret) { printk(KERN_CRIT "Watchdog IRQ zero(0) failed to be requested - %d\n", ret); } } */
gpl-2.0
Radium-Devices/Radium_jflte
drivers/hid/hid-lg4ff.c
1316
15134
/* * Force feedback support for Logitech Speed Force Wireless * * http://wiibrew.org/wiki/Logitech_USB_steering_wheel * * Copyright (c) 2010 Simon Wood <simon@mungewell.org> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include "usbhid/usbhid.h" #include "hid-lg.h" #include "hid-ids.h" #define DFGT_REV_MAJ 0x13 #define DFGT_REV_MIN 0x22 #define DFP_REV_MAJ 0x11 #define DFP_REV_MIN 0x06 #define FFEX_REV_MAJ 0x21 #define FFEX_REV_MIN 0x00 #define G25_REV_MAJ 0x12 #define G25_REV_MIN 0x22 #define G27_REV_MAJ 0x12 #define G27_REV_MIN 0x38 #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range); static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range); static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store); static bool list_inited; struct lg4ff_device_entry { char *device_id; /* Use name in respective kobject structure's address as the ID */ __u16 range; __u16 min_range; __u16 max_range; __u8 leds; struct list_head list; void (*set_range)(struct hid_device *hid, u16 range); }; static struct lg4ff_device_entry device_list; static const signed short lg4ff_wheel_effects[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; struct lg4ff_wheel { const __u32 product_id; const signed short *ff_effects; const __u16 min_range; const __u16 max_range; void (*set_range)(struct hid_device *hid, u16 range); }; static const struct lg4ff_wheel lg4ff_devices[] = { {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp}, {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL} }; struct lg4ff_native_cmd { const __u8 cmd_num; /* Number of commands to send */ const __u8 cmd[]; }; struct lg4ff_usb_revision { const __u16 rev_maj; const __u16 rev_min; const struct lg4ff_native_cmd *command; }; static const struct lg4ff_native_cmd native_dfp = { 1, {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct lg4ff_native_cmd native_dfgt = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */ 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* 2nd command */ }; static const struct lg4ff_native_cmd native_g25 = { 1, {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00} }; static const struct lg4ff_native_cmd native_g27 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */ 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* 2nd command */ }; static const struct lg4ff_usb_revision lg4ff_revs[] = { {DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt}, /* Driving Force GT */ {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ }; static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int x; #define CLAMP(x) if (x < 0) x = 0; if (x > 0xff) x = 0xff switch (effect->type) { case FF_CONSTANT: x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */ CLAMP(x); report->field[0]->value[0] = 0x11; /* Slot 1 */ report->field[0]->value[1] = 0x08; report->field[0]->value[2] = x; report->field[0]->value[3] = 0x80; report->field[0]->value[4] = 0x00; report->field[0]->value[5] = 0x00; report->field[0]->value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); break; } return 0; } /* Sends default autocentering command compatible with * all wheels except Formula Force EX */ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); report->field[0]->value[0] = 0xfe; report->field[0]->value[1] = 0x0d; report->field[0]->value[2] = magnitude >> 13; report->field[0]->value[3] = magnitude >> 13; report->field[0]->value[4] = magnitude >> 8; report->field[0]->value[5] = 0x00; report->field[0]->value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } /* Sends autocentering command compatible with Formula Force EX */ static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); magnitude = magnitude * 90 / 65535; report->field[0]->value[0] = 0xfe; report->field[0]->value[1] = 0x03; report->field[0]->value[2] = magnitude >> 14; report->field[0]->value[3] = magnitude >> 14; report->field[0]->value[4] = magnitude; report->field[0]->value[5] = 0x00; report->field[0]->value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } /* Sends command to set range compatible with G25/G27/Driving Force GT */ static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range) { struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); dbg_hid("G25/G27/DFGT: setting range to %u\n", range); report->field[0]->value[0] = 0xf8; report->field[0]->value[1] = 0x81; report->field[0]->value[2] = range & 0x00ff; report->field[0]->value[3] = (range & 0xff00) >> 8; report->field[0]->value[4] = 0x00; report->field[0]->value[5] = 0x00; report->field[0]->value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } /* Sends commands to set range compatible with Driving Force Pro wheel */ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) { struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int start_left, start_right, full_range; dbg_hid("Driving Force Pro: setting range to %u\n", range); /* Prepare "coarse" limit command */ report->field[0]->value[0] = 0xf8; report->field[0]->value[1] = 0x00; /* Set later */ report->field[0]->value[2] = 0x00; report->field[0]->value[3] = 0x00; report->field[0]->value[4] = 0x00; report->field[0]->value[5] = 0x00; report->field[0]->value[6] = 0x00; if (range > 200) { report->field[0]->value[1] = 0x03; full_range = 900; } else { report->field[0]->value[1] = 0x02; full_range = 200; } usbhid_submit_report(hid, report, USB_DIR_OUT); /* Prepare "fine" limit command */ report->field[0]->value[0] = 0x81; report->field[0]->value[1] = 0x0b; report->field[0]->value[2] = 0x00; report->field[0]->value[3] = 0x00; report->field[0]->value[4] = 0x00; report->field[0]->value[5] = 0x00; report->field[0]->value[6] = 0x00; if (range == 200 || range == 900) { /* Do not apply any fine limit */ usbhid_submit_report(hid, report, USB_DIR_OUT); return; } /* Construct fine limit command */ start_left = (((full_range - range + 1) * 2047) / full_range); start_right = 0xfff - start_left; report->field[0]->value[2] = start_left >> 4; report->field[0]->value[3] = start_right >> 4; report->field[0]->value[4] = 0xff; report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); report->field[0]->value[6] = 0xff; usbhid_submit_report(hid, report, USB_DIR_OUT); } static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd) { struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); __u8 i, j; j = 0; while (j < 7*cmd->cmd_num) { for (i = 0; i < 7; i++) report->field[0]->value[i] = cmd->cmd[j++]; usbhid_submit_report(hid, report, USB_DIR_OUT); } } /* Read current range and display it in terminal */ static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lg4ff_device_entry *uninitialized_var(entry); struct list_head *h; struct hid_device *hid = to_hid_device(dev); size_t count; list_for_each(h, &device_list.list) { entry = list_entry(h, struct lg4ff_device_entry, list); if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) break; } if (h == &device_list.list) { dbg_hid("Device not found!"); return 0; } count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range); return count; } /* Set range to user specified value, call appropriate function * according to the type of the wheel */ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lg4ff_device_entry *uninitialized_var(entry); struct list_head *h; struct hid_device *hid = to_hid_device(dev); __u16 range = simple_strtoul(buf, NULL, 10); list_for_each(h, &device_list.list) { entry = list_entry(h, struct lg4ff_device_entry, list); if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) break; } if (h == &device_list.list) { dbg_hid("Device not found!"); return count; } if (range == 0) range = entry->max_range; /* Check if the wheel supports range setting * and that the range is within limits for the wheel */ if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) { entry->set_range(hid, range); entry->range = range; } return count; } int lg4ff_init(struct hid_device *hid) { struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *dev = hidinput->input; struct lg4ff_device_entry *entry; struct usb_device_descriptor *udesc; int error, i, j; __u16 bcdDevice, rev_maj, rev_min; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; /* Check what wheel has been connected */ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { if (hid->product == lg4ff_devices[i].product_id) { dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id); break; } } if (i == ARRAY_SIZE(lg4ff_devices)) { hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to" "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n"); return -1; } /* Attempt to switch wheel to native mode when applicable */ udesc = &(hid_to_usb_dev(hid)->descriptor); if (!udesc) { hid_err(hid, "NULL USB device descriptor\n"); return -1; } bcdDevice = le16_to_cpu(udesc->bcdDevice); rev_maj = bcdDevice >> 8; rev_min = bcdDevice & 0xff; if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) { dbg_hid("Generic wheel detected, can it do native?\n"); dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min); for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) { if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) { hid_lg4ff_switch_native(hid, lg4ff_revs[j].command); hid_info(hid, "Switched to native mode\n"); } } } /* Set supported force feedback capabilities */ for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++) set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit); error = input_ff_create_memless(dev, NULL, hid_lg4ff_play); if (error) return error; /* Check if autocentering is available and * set the centering force to zero by default */ if (test_bit(FF_AUTOCENTER, dev->ffbit)) { if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex; else dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default; dev->ff->set_autocenter(dev, 0); } /* Initialize device_list if this is the first device to handle by lg4ff */ if (!list_inited) { INIT_LIST_HEAD(&device_list.list); list_inited = 1; } /* Add the device to device_list */ entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL); if (!entry) { hid_err(hid, "Cannot add device, insufficient memory.\n"); return -ENOMEM; } entry->device_id = kstrdup((&hid->dev)->kobj.name, GFP_KERNEL); if (!entry->device_id) { hid_err(hid, "Cannot set device_id, insufficient memory.\n"); kfree(entry); return -ENOMEM; } entry->min_range = lg4ff_devices[i].min_range; entry->max_range = lg4ff_devices[i].max_range; entry->set_range = lg4ff_devices[i].set_range; list_add(&entry->list, &device_list.list); /* Create sysfs interface */ error = device_create_file(&hid->dev, &dev_attr_range); if (error) return error; dbg_hid("sysfs interface created\n"); /* Set the maximum range to start with */ entry->range = entry->max_range; if (entry->set_range != NULL) entry->set_range(hid, entry->range); hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n"); return 0; } int lg4ff_deinit(struct hid_device *hid) { bool found = 0; struct lg4ff_device_entry *entry; struct list_head *h, *g; list_for_each_safe(h, g, &device_list.list) { entry = list_entry(h, struct lg4ff_device_entry, list); if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) { list_del(h); kfree(entry->device_id); kfree(entry); found = 1; break; } } if (!found) { dbg_hid("Device entry not found!\n"); return -1; } device_remove_file(&hid->dev, &dev_attr_range); dbg_hid("Device successfully unregistered\n"); return 0; }
gpl-2.0
GameTheory-/android_kernel_g4stylusn2
drivers/scsi/aacraid/commctrl.c
1572
24178
/* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Module Name: * commctrl.c * * Abstract: Contains all routines for control of the AFA comm layer * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/blkdev.h> #include <linux/delay.h> /* ssleep prototype */ #include <linux/kthread.h> #include <linux/semaphore.h> #include <asm/uaccess.h> #include <scsi/scsi_host.h> #include "aacraid.h" /** * ioctl_send_fib - send a FIB from userspace * @dev: adapter is being processed * @arg: arguments to the ioctl call * * This routine sends a fib to the adapter on behalf of a user level * program. */ # define AAC_DEBUG_PREAMBLE KERN_INFO # define AAC_DEBUG_POSTAMBLE static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) { struct hw_fib * kfib; struct fib *fibptr; struct hw_fib * hw_fib = (struct hw_fib *)0; dma_addr_t hw_fib_pa = (dma_addr_t)0LL; unsigned size; int retval; if (dev->in_reset) { return -EBUSY; } fibptr = aac_fib_alloc(dev); if(fibptr == NULL) { return -ENOMEM; } kfib = fibptr->hw_fib_va; /* * First copy in the header so that we can check the size field. */ if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { aac_fib_free(fibptr); return -EFAULT; } /* * Since we copy based on the fib header size, make sure that we * will not overrun the buffer when we copy the memory. Return * an error if we would. */ size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize)) size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) { dma_addr_t daddr; if (size > 2048) { retval = -EINVAL; goto cleanup; } kfib = pci_alloc_consistent(dev->pdev, size, &daddr); if (!kfib) { retval = -ENOMEM; goto cleanup; } /* Highjack the hw_fib */ hw_fib = fibptr->hw_fib_va; hw_fib_pa = fibptr->hw_fib_pa; fibptr->hw_fib_va = kfib; fibptr->hw_fib_pa = daddr; memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); memcpy(kfib, hw_fib, dev->max_fib_size); } if (copy_from_user(kfib, arg, size)) { retval = -EFAULT; goto cleanup; } if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { aac_adapter_interrupt(dev); /* * Since we didn't really send a fib, zero out the state to allow * cleanup code not to assert. */ kfib->header.XferState = 0; } else { retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, le16_to_cpu(kfib->header.Size) , FsaNormal, 1, 1, NULL, NULL); if (retval) { goto cleanup; } if (aac_fib_complete(fibptr) != 0) { retval = -EINVAL; goto cleanup; } } /* * Make sure that the size returned by the adapter (which includes * the header) is less than or equal to the size of a fib, so we * don't corrupt application data. Then copy that size to the user * buffer. (Don't try to add the header information again, since it * was already included by the adapter.) */ retval = 0; if (copy_to_user(arg, (void *)kfib, size)) retval = -EFAULT; cleanup: if (hw_fib) { pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib_va = hw_fib; } if (retval != -ERESTARTSYS) aac_fib_free(fibptr); return retval; } /** * open_getadapter_fib - Get the next fib * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user. */ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context * fibctx; int status; fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); if (fibctx == NULL) { status = -ENOMEM; } else { unsigned long flags; struct list_head * entry; struct aac_fib_context * context; fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; fibctx->size = sizeof(struct aac_fib_context); /* * Yes yes, I know this could be an index, but we have a * better guarantee of uniqueness for the locked loop below. * Without the aid of a persistent history, this also helps * reduce the chance that the opaque context would be reused. */ fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); /* * Initialize the mutex used to wait for the next AIF. */ sema_init(&fibctx->wait_sem, 0); fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on * the list to 0. */ fibctx->count = 0; INIT_LIST_HEAD(&fibctx->fib_list); fibctx->jiffies = jiffies/HZ; /* * Now add this context onto the adapter's * AdapterFibContext list. */ spin_lock_irqsave(&dev->fib_lock, flags); /* Ensure that we have a unique identifier */ entry = dev->fib_list.next; while (entry != &dev->fib_list) { context = list_entry(entry, struct aac_fib_context, next); if (context->unique == fibctx->unique) { /* Not unique (32 bits) */ fibctx->unique++; entry = dev->fib_list.next; } else { entry = entry->next; } } list_add_tail(&fibctx->next, &dev->fib_list); spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(arg, &fibctx->unique, sizeof(fibctx->unique))) { status = -EFAULT; } else { status = 0; } } return status; } /** * next_getadapter_fib - get the next fib * @dev: adapter to use * @arg: ioctl argument * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user. */ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct fib_ioctl f; struct fib *fib; struct aac_fib_context *fibctx; int status; struct list_head * entry; unsigned long flags; if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) return -EFAULT; /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address */ spin_lock_irqsave(&dev->fib_lock, flags); entry = dev->fib_list.next; fibctx = NULL; while (entry != &dev->fib_list) { fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the AdapterFibContext from the Input parameters. */ if (fibctx->unique == f.fibctx) { /* We found a winner */ break; } entry = entry->next; fibctx = NULL; } if (!fibctx) { spin_unlock_irqrestore(&dev->fib_lock, flags); dprintk ((KERN_INFO "Fib Context not found\n")); return -EINVAL; } if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || (fibctx->size != sizeof(struct aac_fib_context))) { spin_unlock_irqrestore(&dev->fib_lock, flags); dprintk ((KERN_INFO "Fib Context corrupt?\n")); return -EINVAL; } status = 0; /* * If there are no fibs to send back, then either wait or return * -EAGAIN */ return_fib: if (!list_empty(&fibctx->fib_list)) { /* * Pull the next fib from the fibs */ entry = fibctx->fib_list.next; list_del(entry); fib = list_entry(entry, struct fib, fiblink); fibctx->count--; spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { kfree(fib->hw_fib_va); kfree(fib); return -EFAULT; } /* * Free the space occupied by this copy of the fib. */ kfree(fib->hw_fib_va); kfree(fib); status = 0; } else { spin_unlock_irqrestore(&dev->fib_lock, flags); /* If someone killed the AIF aacraid thread, restart it */ status = !dev->aif_thread; if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { /* Be paranoid, be very paranoid! */ kthread_stop(dev->thread); ssleep(1); dev->aif_thread = 0; dev->thread = kthread_run(aac_command_thread, dev, dev->name); ssleep(1); } if (f.wait) { if(down_interruptible(&fibctx->wait_sem) < 0) { status = -ERESTARTSYS; } else { /* Lock again and retry */ spin_lock_irqsave(&dev->fib_lock, flags); goto return_fib; } } else { status = -EAGAIN; } } fibctx->jiffies = jiffies/HZ; return status; } int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) { struct fib *fib; /* * First free any FIBs that have not been consumed. */ while (!list_empty(&fibctx->fib_list)) { struct list_head * entry; /* * Pull the next fib from the fibs */ entry = fibctx->fib_list.next; list_del(entry); fib = list_entry(entry, struct fib, fiblink); fibctx->count--; /* * Free the space occupied by this copy of the fib. */ kfree(fib->hw_fib_va); kfree(fib); } /* * Remove the Context from the AdapterFibContext List */ list_del(&fibctx->next); /* * Invalidate context */ fibctx->type = 0; /* * Free the space occupied by the Context */ kfree(fibctx); return 0; } /** * close_getadapter_fib - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine will close down the fibctx passed in from the user. */ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context *fibctx; int status; unsigned long flags; struct list_head * entry; /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address */ entry = dev->fib_list.next; fibctx = NULL; while(entry != &dev->fib_list) { fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the fibctx from the input parameters */ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ break; entry = entry->next; fibctx = NULL; } if (!fibctx) return 0; /* Already gone */ if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || (fibctx->size != sizeof(struct aac_fib_context))) return -EINVAL; spin_lock_irqsave(&dev->fib_lock, flags); status = aac_close_fib_context(dev, fibctx); spin_unlock_irqrestore(&dev->fib_lock, flags); return status; } /** * check_revision - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine returns the driver version. * Under Linux, there have been no version incompatibilities, so this is * simple! */ static int check_revision(struct aac_dev *dev, void __user *arg) { struct revision response; char *driver_version = aac_driver_version; u32 version; response.compat = 1; version = (simple_strtol(driver_version, &driver_version, 10) << 24) | 0x00000400; version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; version += simple_strtol(driver_version + 1, NULL, 10); response.version = cpu_to_le32(version); # ifdef AAC_DRIVER_BUILD response.build = cpu_to_le32(AAC_DRIVER_BUILD); # else response.build = cpu_to_le32(9999); # endif if (copy_to_user(arg, &response, sizeof(response))) return -EFAULT; return 0; } /** * * aac_send_raw_scb * */ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) { struct fib* srbfib; int status; struct aac_srb *srbcmd = NULL; struct user_aac_srb *user_srbcmd = NULL; struct user_aac_srb __user *user_srb = arg; struct aac_srb_reply __user *user_reply; struct aac_srb_reply* reply; u32 fibsize = 0; u32 flags = 0; s32 rcode = 0; u32 data_dir; void __user *sg_user[32]; void *sg_list[32]; u32 sg_indx = 0; u32 byte_count = 0; u32 actual_fibsize64, actual_fibsize = 0; int i; if (dev->in_reset) { dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); return -EBUSY; } if (!capable(CAP_SYS_ADMIN)){ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); return -EPERM; } /* * Allocate and initialize a Fib then setup a SRB command */ if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM; } aac_fib_init(srbfib); /* raw_srb FIB is not FastResponseCapable */ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); srbcmd = (struct aac_srb*) fib_data(srbfib); memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); rcode = -EFAULT; goto cleanup; } if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } user_srbcmd = kmalloc(fibsize, GFP_KERNEL); if (!user_srbcmd) { dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); rcode = -ENOMEM; goto cleanup; } if(copy_from_user(user_srbcmd, user_srb,fibsize)){ dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); rcode = -EFAULT; goto cleanup; } user_reply = arg+fibsize; flags = user_srbcmd->flags; /* from user in cpu order */ // Fix up srb for endian and force some values srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this srbcmd->channel = cpu_to_le32(user_srbcmd->channel); srbcmd->id = cpu_to_le32(user_srbcmd->id); srbcmd->lun = cpu_to_le32(user_srbcmd->lun); srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); srbcmd->flags = cpu_to_le32(flags); srbcmd->retry_limit = 0; // Obsolete parameter srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); switch (flags & (SRB_DataIn | SRB_DataOut)) { case SRB_DataOut: data_dir = DMA_TO_DEVICE; break; case (SRB_DataIn | SRB_DataOut): data_dir = DMA_BIDIRECTIONAL; break; case SRB_DataIn: data_dir = DMA_FROM_DEVICE; break; default: data_dir = DMA_NONE; } if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", le32_to_cpu(srbcmd->sg.count))); rcode = -EINVAL; goto cleanup; } actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * (sizeof(struct sgentry64) - sizeof(struct sgentry)); /* User made a mistake - should not continue */ if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { dprintk((KERN_DEBUG"aacraid: Bad Size specified in " "Raw SRB command calculated fibsize=%lu;%lu " "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " "issued fibsize=%d\n", actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, sizeof(struct aac_srb), sizeof(struct sgentry), sizeof(struct sgentry64), fibsize)); rcode = -EINVAL; goto cleanup; } if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); rcode = -EINVAL; goto cleanup; } byte_count = 0; if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; /* * This should also catch if user used the 32 bit sgmap */ if (actual_fibsize64 == fibsize) { actual_fibsize = actual_fibsize64; for (i = 0; i < upsg->count; i++) { u64 addr; void* p; if (upsg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", upsg->sg[i].count,i,upsg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)upsg->sg[i].addr[0]; addr += ((u64)upsg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)(uintptr_t)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += upsg->sg[i].count; psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); } } else { struct user_sgmap* usg; usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap), GFP_KERNEL); if (!usg) { dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); rcode = -ENOMEM; goto cleanup; } memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap)); actual_fibsize = actual_fibsize64; for (i = 0; i < usg->count; i++) { u64 addr; void* p; if (usg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { kfree(usg); rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); kfree(usg); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ kfree (usg); dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(usg->sg[i].count); } kfree (usg); } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; struct sgmap* psg = &srbcmd->sg; if (actual_fibsize64 == fibsize) { struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; for (i = 0; i < upsg->count; i++) { uintptr_t addr; void* p; if (usg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)usg->sg[i].addr[0]; addr += ((u64)usg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(usg->sg[i].count); } } else { for (i = 0; i < upsg->count; i++) { dma_addr_t addr; void* p; if (upsg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } p = kmalloc(upsg->sg[i].count, GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", upsg->sg[i].count, i, upsg->count)); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p, sg_user[i], upsg->sg[i].count)) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr); byte_count += upsg->sg[i].count; psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); } } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status == -ERESTARTSYS) { rcode = -ERESTARTSYS; goto cleanup; } if (status != 0){ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); rcode = -ENXIO; goto cleanup; } if (flags & SRB_DataIn) { for(i = 0 ; i <= sg_indx; i++){ byte_count = le32_to_cpu( (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count : srbcmd->sg.sg[i].count); if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); rcode = -EFAULT; goto cleanup; } } } reply = (struct aac_srb_reply *) fib_data(srbfib); if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); rcode = -EFAULT; goto cleanup; } cleanup: kfree(user_srbcmd); for(i=0; i <= sg_indx; i++){ kfree(sg_list[i]); } if (rcode != -ERESTARTSYS) { aac_fib_complete(srbfib); aac_fib_free(srbfib); } return rcode; } struct aac_pci_info { u32 bus; u32 slot; }; static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) { struct aac_pci_info pci_info; pci_info.bus = dev->pdev->bus->number; pci_info.slot = PCI_SLOT(dev->pdev->devfn); if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); return -EFAULT; } return 0; } int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) { int status; /* * HBA gets first crack */ status = aac_dev_ioctl(dev, cmd, arg); if (status != -ENOTTY) return status; switch (cmd) { case FSACTL_MINIPORT_REV_CHECK: status = check_revision(dev, arg); break; case FSACTL_SEND_LARGE_FIB: case FSACTL_SENDFIB: status = ioctl_send_fib(dev, arg); break; case FSACTL_OPEN_GET_ADAPTER_FIB: status = open_getadapter_fib(dev, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: status = next_getadapter_fib(dev, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: status = close_getadapter_fib(dev, arg); break; case FSACTL_SEND_RAW_SRB: status = aac_send_raw_srb(dev,arg); break; case FSACTL_GET_PCI_INFO: status = aac_get_pci_info(dev,arg); break; default: status = -ENOTTY; break; } return status; }
gpl-2.0
BlackBox-Kernel/blackbox_sprout_lp
drivers/media/platform/davinci/vpif_capture.c
2084
63350
/* * Copyright (C) 2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO : add support for VBI & HBI data service * add static buffer allocation */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/string.h> #include <linux/videodev2.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include "vpif_capture.h" #include "vpif.h" MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPIF_CAPTURE_VERSION); #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; static u32 ch0_numbuffers = 3; static u32 ch1_numbuffers = 3; static u32 ch0_bufsize = 1920 * 1080 * 2; static u32 ch1_bufsize = 720 * 576 * 2; module_param(debug, int, 0644); module_param(ch0_numbuffers, uint, S_IRUGO); module_param(ch1_numbuffers, uint, S_IRUGO); module_param(ch0_bufsize, uint, S_IRUGO); module_param(ch1_bufsize, uint, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level 0-1"); MODULE_PARM_DESC(ch2_numbuffers, "Channel0 buffer count (default:3)"); MODULE_PARM_DESC(ch3_numbuffers, "Channel1 buffer count (default:3)"); MODULE_PARM_DESC(ch2_bufsize, "Channel0 buffer size (default:1920 x 1080 x 2)"); MODULE_PARM_DESC(ch3_bufsize, "Channel1 buffer size (default:720 x 576 x 2)"); static struct vpif_config_params config_params = { .min_numbuffers = 3, .numbuffers[0] = 3, .numbuffers[1] = 3, .min_bufsize[0] = 720 * 480 * 2, .min_bufsize[1] = 720 * 480 * 2, .channel_bufsize[0] = 1920 * 1080 * 2, .channel_bufsize[1] = 720 * 576 * 2, }; /* global variables */ static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; static void vpif_calculate_offsets(struct channel_obj *ch); static void vpif_config_addr(struct channel_obj *ch, int muxmode); /** * buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct vb2_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue); struct vb2_queue *q = vb->vb2_queue; struct channel_obj *ch = fh->channel; struct common_obj *common; unsigned long addr; vpif_dbg(2, debug, "vpif_buffer_prepare\n"); common = &ch->common[VPIF_VIDEO_INDEX]; if (vb->state != VB2_BUF_STATE_ACTIVE && vb->state != VB2_BUF_STATE_PREPARED) { vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage); if (vb2_plane_vaddr(vb, 0) && vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) goto exit; addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (q->streaming) { if (!IS_ALIGNED((addr + common->ytop_off), 8) || !IS_ALIGNED((addr + common->ybtm_off), 8) || !IS_ALIGNED((addr + common->ctop_off), 8) || !IS_ALIGNED((addr + common->cbtm_off), 8)) goto exit; } } return 0; exit: vpif_dbg(1, debug, "buffer_prepare:offset is not aligned to 8 bytes\n"); return -EINVAL; } /** * vpif_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr * @fmt: v4l2 format * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. * @alloc_ctxs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpif_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { /* Get the file handle object and channel object */ struct vpif_fh *fh = vb2_get_drv_priv(vq); struct channel_obj *ch = fh->channel; struct common_obj *common; unsigned long size; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_setup\n"); /* If memory type is not mmap, return */ if (V4L2_MEMORY_MMAP == common->memory) { /* Calculate the size of the buffer */ size = config_params.channel_bufsize[ch->channel_id]; /* * Checking if the buffer size exceeds the available buffer * ycmux_mode = 0 means 1 channel mode HD and * ycmux_mode = 1 means 2 channels mode SD */ if (ch->vpifparams.std_info.ycmux_mode == 0) { if (config_params.video_limit[ch->channel_id]) while (size * *nbuffers > (config_params.video_limit[0] + config_params.video_limit[1])) (*nbuffers)--; } else { if (config_params.video_limit[ch->channel_id]) while (size * *nbuffers > config_params.video_limit[ch->channel_id]) (*nbuffers)--; } } else { size = common->fmt.fmt.pix.sizeimage; } if (*nbuffers < config_params.min_numbuffers) *nbuffers = config_params.min_numbuffers; *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = common->alloc_ctx; return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer */ static void vpif_buffer_queue(struct vb2_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue); struct channel_obj *ch = fh->channel; struct vpif_cap_buffer *buf = container_of(vb, struct vpif_cap_buffer, vb); struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_queue\n"); spin_lock_irqsave(&common->irqlock, flags); /* add the buffer to the DMA queue */ list_add_tail(&buf->list, &common->dma_queue); spin_unlock_irqrestore(&common->irqlock, flags); } /** * vpif_buf_cleanup : Callback function to free buffer * @vb: ptr to vb2_buffer * * This function is called from the videobuf2 layer to free memory * allocated to the buffers */ static void vpif_buf_cleanup(struct vb2_buffer *vb) { /* Get the file handle object and channel object */ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue); struct vpif_cap_buffer *buf = container_of(vb, struct vpif_cap_buffer, vb); struct channel_obj *ch = fh->channel; struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; spin_lock_irqsave(&common->irqlock, flags); if (vb->state == VB2_BUF_STATE_ACTIVE) list_del_init(&buf->list); spin_unlock_irqrestore(&common->irqlock, flags); } static void vpif_wait_prepare(struct vb2_queue *vq) { struct vpif_fh *fh = vb2_get_drv_priv(vq); struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; mutex_unlock(&common->lock); } static void vpif_wait_finish(struct vb2_queue *vq) { struct vpif_fh *fh = vb2_get_drv_priv(vq); struct channel_obj *ch = fh->channel; struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; mutex_lock(&common->lock); } static int vpif_buffer_init(struct vb2_buffer *vb) { struct vpif_cap_buffer *buf = container_of(vb, struct vpif_cap_buffer, vb); INIT_LIST_HEAD(&buf->list); return 0; } static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} }; static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpif_capture_config *vpif_config_data = vpif_dev->platform_data; struct vpif_fh *fh = vb2_get_drv_priv(vq); struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpif = &ch->vpifparams; unsigned long addr = 0; unsigned long flags; int ret; /* If buffer queue is empty, return error */ spin_lock_irqsave(&common->irqlock, flags); if (list_empty(&common->dma_queue)) { spin_unlock_irqrestore(&common->irqlock, flags); vpif_dbg(1, debug, "buffer queue is empty\n"); return -EIO; } /* Get the next frame from the buffer queue */ common->cur_frm = common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove buffer from the buffer queue */ list_del(&common->cur_frm->list); spin_unlock_irqrestore(&common->irqlock, flags); /* Mark state of the current frame to active */ common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE; /* Initialize field_id and started member */ ch->field_id = 0; common->started = 1; addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0); /* Calculate the offset for Y and C data in the buffer */ vpif_calculate_offsets(ch); if ((vpif->std_info.frm_fmt && ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) && (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) || (!vpif->std_info.frm_fmt && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) { vpif_dbg(1, debug, "conflict in field format and std format\n"); return -EINVAL; } /* configure 1 or 2 channel mode */ if (vpif_config_data->setup_input_channel_mode) { ret = vpif_config_data-> setup_input_channel_mode(vpif->std_info.ycmux_mode); if (ret < 0) { vpif_dbg(1, debug, "can't set vpif channel mode\n"); return ret; } } /* Call vpif_set_params function to set the parameters and addresses */ ret = vpif_set_video_params(vpif, ch->channel_id); if (ret < 0) { vpif_dbg(1, debug, "can't set video params\n"); return ret; } common->started = ret; vpif_config_addr(ch, ret); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); /** * Set interrupt for both the fields in VPIF Register enable channel in * VPIF register */ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) { channel0_intr_assert(); channel0_intr_enable(1); enable_channel0(1); } if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || (common->started == 2)) { channel1_intr_assert(); channel1_intr_enable(1); enable_channel1(1); } return 0; } /* abort streaming and wait for last buffer */ static int vpif_stop_streaming(struct vb2_queue *vq) { struct vpif_fh *fh = vb2_get_drv_priv(vq); struct channel_obj *ch = fh->channel; struct common_obj *common; unsigned long flags; if (!vb2_is_streaming(vq)) return 0; common = &ch->common[VPIF_VIDEO_INDEX]; /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); list_del(&common->next_frm->list); vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&common->irqlock, flags); return 0; } static struct vb2_ops video_qops = { .queue_setup = vpif_buffer_queue_setup, .wait_prepare = vpif_wait_prepare, .wait_finish = vpif_wait_finish, .buf_init = vpif_buffer_init, .buf_prepare = vpif_buffer_prepare, .start_streaming = vpif_start_streaming, .stop_streaming = vpif_stop_streaming, .buf_cleanup = vpif_buf_cleanup, .buf_queue = vpif_buffer_queue, }; /** * vpif_process_buffer_complete: process a completed buffer * @common: ptr to common channel object * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static void vpif_process_buffer_complete(struct common_obj *common) { v4l2_get_timestamp(&common->cur_frm->vb.v4l2_buf.timestamp); vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_DONE); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; } /** * vpif_schedule_next_buffer: set next buffer address for capture * @common : ptr to common channel object * * This function will get next buffer from the dma queue and * set the buffer address in the vpif register for capture. * the buffer is marked active */ static void vpif_schedule_next_buffer(struct common_obj *common) { unsigned long addr = 0; spin_lock(&common->irqlock); common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->list); spin_unlock(&common->irqlock); common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE; addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0); /* Set top and bottom field addresses in VPIF registers */ common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } /** * vpif_channel_isr : ISR handler for vpif capture * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct common_obj *common; struct channel_obj *ch; enum v4l2_field field; int channel_id = 0; int fid = -1, i; channel_id = *(int *)(dev_id); if (!vpif_intr_status(channel_id)) return IRQ_NONE; ch = dev->dev[channel_id]; field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field; for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) { common = &ch->common[i]; /* skip If streaming is not started in this channel */ if (0 == common->started) continue; /* Check the field format */ if (1 == ch->vpifparams.std_info.frm_fmt) { /* Progressive mode */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); if (!channel_first_int[i][channel_id]) vpif_process_buffer_complete(common); channel_first_int[i][channel_id] = 0; vpif_schedule_next_buffer(common); channel_first_int[i][channel_id] = 0; } else { /** * Interlaced mode. If it is first interrupt, ignore * it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id); if (fid != ch->field_id) { /** * If field id does not match stored * field id, make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } /* device field id and local field id are in sync */ if (0 == fid) { /* this is even field */ if (common->cur_frm == common->next_frm) continue; /* mark the current buffer as done */ vpif_process_buffer_complete(common); } else if (1 == fid) { /* odd field */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); vpif_schedule_next_buffer(common); } } } return IRQ_HANDLED; } /** * vpif_update_std_info() - update standard related info * @ch: ptr to channel object * * For a given standard selected by application, update values * in the device data structures */ static int vpif_update_std_info(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; const struct vpif_channel_config_params *config; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; int index; vpif_dbg(2, debug, "vpif_update_std_info\n"); for (index = 0; index < vpif_ch_params_count; index++) { config = &vpif_ch_params[index]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } else { vpif_dbg(2, debug, "HD format\n"); if (!memcmp(&config->dv_timings, &vid_ch->dv_timings, sizeof(vid_ch->dv_timings))) { memcpy(std_info, config, sizeof(*config)); break; } } } /* standard not found */ if (index == vpif_ch_params_count) return -EINVAL; common->fmt.fmt.pix.width = std_info->width; common->width = std_info->width; common->fmt.fmt.pix.height = std_info->height; common->height = std_info->height; common->fmt.fmt.pix.bytesperline = std_info->width; vpifparams->video_params.hpitch = std_info->width; vpifparams->video_params.storage_mode = std_info->frm_fmt; return 0; } /** * vpif_calculate_offsets : This function calculates buffers offsets * @ch : ptr to channel object * * This function calculates buffer offsets for Y and C in the top and * bottom field */ static void vpif_calculate_offsets(struct channel_obj *ch) { unsigned int hpitch, vpitch, sizeimage; struct video_obj *vid_ch = &(ch->video); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; enum v4l2_field field = common->fmt.fmt.pix.field; vpif_dbg(2, debug, "vpif_calculate_offsets\n"); if (V4L2_FIELD_ANY == field) { if (vpifparams->std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = V4L2_FIELD_INTERLACED; } else vid_ch->buf_field = common->fmt.fmt.pix.field; sizeimage = common->fmt.fmt.pix.sizeimage; hpitch = common->fmt.fmt.pix.bytesperline; vpitch = sizeimage / (hpitch * 2); if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) vpifparams->video_params.storage_mode = 1; else vpifparams->video_params.storage_mode = 0; if (1 == vpifparams->std_info.frm_fmt) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; else { if ((field == V4L2_FIELD_ANY) || (field == V4L2_FIELD_INTERLACED)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; } /** * vpif_config_format: configure default frame format in the device * ch : ptr to channel object */ static void vpif_config_format(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_config_format\n"); common->fmt.fmt.pix.field = V4L2_FIELD_ANY; if (config_params.numbuffers[ch->channel_id] == 0) common->memory = V4L2_MEMORY_USERPTR; else common->memory = V4L2_MEMORY_MMAP; common->fmt.fmt.pix.sizeimage = config_params.channel_bufsize[ch->channel_id]; if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; else common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P; common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; } /** * vpif_get_default_field() - Get default field type based on interface * @vpif_params - ptr to vpif params */ static inline enum v4l2_field vpif_get_default_field( struct vpif_interface *iface) { return (iface->if_type == VPIF_IF_RAW_BAYER) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED; } /** * vpif_check_format() - check given pixel format for compatibility * @ch - channel ptr * @pixfmt - Given pixel format * @update - update the values as per hardware requirement * * Check the application pixel format for S_FMT and update the input * values as per hardware limits for TRY_FMT. The default pixel and * field format is selected based on interface type. */ static int vpif_check_format(struct channel_obj *ch, struct v4l2_pix_format *pixfmt, int update) { struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); struct vpif_params *vpif_params = &ch->vpifparams; enum v4l2_field field = pixfmt->field; u32 sizeimage, hpitch, vpitch; int ret = -EINVAL; vpif_dbg(2, debug, "vpif_check_format\n"); /** * first check for the pixel format. If if_type is Raw bayer, * only V4L2_PIX_FMT_SBGGR8 format is supported. Otherwise only * V4L2_PIX_FMT_YUV422P is supported */ if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) { if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8) { if (!update) { vpif_dbg(2, debug, "invalid pix format\n"); goto exit; } pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } } else { if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) { if (!update) { vpif_dbg(2, debug, "invalid pixel format\n"); goto exit; } pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P; } } if (!(VPIF_VALID_FIELD(field))) { if (!update) { vpif_dbg(2, debug, "invalid field format\n"); goto exit; } /** * By default use FIELD_NONE for RAW Bayer capture * and FIELD_INTERLACED for other interfaces */ field = vpif_get_default_field(&vpif_params->iface); } else if (field == V4L2_FIELD_ANY) /* unsupported field. Use default */ field = vpif_get_default_field(&vpif_params->iface); /* validate the hpitch */ hpitch = pixfmt->bytesperline; if (hpitch < vpif_params->std_info.width) { if (!update) { vpif_dbg(2, debug, "invalid hpitch\n"); goto exit; } hpitch = vpif_params->std_info.width; } sizeimage = pixfmt->sizeimage; vpitch = sizeimage / (hpitch * 2); /* validate the vpitch */ if (vpitch < vpif_params->std_info.height) { if (!update) { vpif_dbg(2, debug, "Invalid vpitch\n"); goto exit; } vpitch = vpif_params->std_info.height; } /* Check for 8 byte alignment */ if (!ALIGN(hpitch, 8)) { if (!update) { vpif_dbg(2, debug, "invalid pitch alignment\n"); goto exit; } /* adjust to next 8 byte boundary */ hpitch = (((hpitch + 7) / 8) * 8); } /* if update is set, modify the bytesperline and sizeimage */ if (update) { pixfmt->bytesperline = hpitch; pixfmt->sizeimage = hpitch * vpitch * 2; } /** * Image width and height is always based on current standard width and * height */ pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; return 0; exit: return ret; } /** * vpif_config_addr() - function to configure buffer address in vpif * @ch - channel ptr * @muxmode - channel mux mode */ static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common; vpif_dbg(2, debug, "vpif_config_addr\n"); common = &(ch->common[VPIF_VIDEO_INDEX]); if (VPIF_CHANNEL1_VIDEO == ch->channel_id) common->set_addr = ch1_set_videobuf_addr; else if (2 == muxmode) common->set_addr = ch0_set_videobuf_addr_yc_nmux; else common->set_addr = ch0_set_videobuf_addr; } /** * vpif_mmap : It is used to map kernel space buffers into user spaces * @filep: file pointer * @vma: ptr to vm_area_struct */ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma) { /* Get the channel object and file handle object */ struct vpif_fh *fh = filep->private_data; struct channel_obj *ch = fh->channel; struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); int ret; vpif_dbg(2, debug, "vpif_mmap\n"); if (mutex_lock_interruptible(&common->lock)) return -ERESTARTSYS; ret = vb2_mmap(&common->buffer_queue, vma); mutex_unlock(&common->lock); return ret; } /** * vpif_poll: It is used for select/poll system call * @filep: file pointer * @wait: poll table to wait */ static unsigned int vpif_poll(struct file *filep, poll_table * wait) { struct vpif_fh *fh = filep->private_data; struct channel_obj *channel = fh->channel; struct common_obj *common = &(channel->common[VPIF_VIDEO_INDEX]); unsigned int res = 0; vpif_dbg(2, debug, "vpif_poll\n"); if (common->started) { mutex_lock(&common->lock); res = vb2_poll(&common->buffer_queue, filep, wait); mutex_unlock(&common->lock); } return res; } /** * vpif_open : vpif open handler * @filep: file ptr * * It creates object of file handle structure and stores it in private_data * member of filepointer */ static int vpif_open(struct file *filep) { struct video_device *vdev = video_devdata(filep); struct common_obj *common; struct video_obj *vid_ch; struct channel_obj *ch; struct vpif_fh *fh; vpif_dbg(2, debug, "vpif_open\n"); ch = video_get_drvdata(vdev); vid_ch = &ch->video; common = &ch->common[VPIF_VIDEO_INDEX]; /* Allocate memory for the file handle object */ fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL); if (NULL == fh) { vpif_err("unable to allocate memory for file handle object\n"); return -ENOMEM; } if (mutex_lock_interruptible(&common->lock)) { kfree(fh); return -ERESTARTSYS; } /* store pointer to fh in private_data member of filep */ filep->private_data = fh; fh->channel = ch; fh->initialized = 0; /* If decoder is not initialized. initialize it */ if (!ch->initialized) { fh->initialized = 1; ch->initialized = 1; memset(&(ch->vpifparams), 0, sizeof(struct vpif_params)); } /* Increment channel usrs counter */ ch->usrs++; /* Set io_allowed member to false */ fh->io_allowed[VPIF_VIDEO_INDEX] = 0; /* Initialize priority of this instance to default priority */ fh->prio = V4L2_PRIORITY_UNSET; v4l2_prio_open(&ch->prio, &fh->prio); mutex_unlock(&common->lock); return 0; } /** * vpif_release : function to clean up file close * @filep: file pointer * * This function deletes buffer queue, frees the buffers and the vpif file * handle */ static int vpif_release(struct file *filep) { struct vpif_fh *fh = filep->private_data; struct channel_obj *ch = fh->channel; struct common_obj *common; vpif_dbg(2, debug, "vpif_release\n"); common = &ch->common[VPIF_VIDEO_INDEX]; mutex_lock(&common->lock); /* if this instance is doing IO */ if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; /* Disable channel as per its device type and channel id */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || (2 == common->started)) { enable_channel1(0); channel1_intr_enable(0); } common->started = 0; /* Free buffers allocated */ vb2_queue_release(&common->buffer_queue); vb2_dma_contig_cleanup_ctx(common->alloc_ctx); } /* Decrement channel usrs counter */ ch->usrs--; /* Close the priority */ v4l2_prio_close(&ch->prio, fh->prio); if (fh->initialized) ch->initialized = 0; mutex_unlock(&common->lock); filep->private_data = NULL; kfree(fh); return 0; } /** * vpif_reqbufs() - request buffer handler * @file: file ptr * @priv: file handle * @reqbuf: request buffer structure ptr */ static int vpif_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbuf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common; u8 index = 0; struct vb2_queue *q; int ret; vpif_dbg(2, debug, "vpif_reqbufs\n"); /** * This file handle has not initialized the channel, * It is not allowed to do settings */ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } if (V4L2_BUF_TYPE_VIDEO_CAPTURE != reqbuf->type || !vpif_dev) return -EINVAL; index = VPIF_VIDEO_INDEX; common = &ch->common[index]; if (0 != common->io_usrs) return -EBUSY; /* Initialize videobuf2 queue as per the buffer type */ common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev); if (IS_ERR(common->alloc_ctx)) { vpif_err("Failed to get the context\n"); return PTR_ERR(common->alloc_ctx); } q = &common->buffer_queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = fh; q->ops = &video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpif_cap_buffer); q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(q); if (ret) { vpif_err("vpif_capture: vb2_queue_init() failed\n"); vb2_dma_contig_cleanup_ctx(common->alloc_ctx); return ret; } /* Set io allowed member of file handle to TRUE */ fh->io_allowed[index] = 1; /* Increment io usrs member of channel object to 1 */ common->io_usrs = 1; /* Store type of memory requested in channel object */ common->memory = reqbuf->memory; INIT_LIST_HEAD(&common->dma_queue); /* Allocate buffers */ return vb2_reqbufs(&common->buffer_queue, reqbuf); } /** * vpif_querybuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_querybuf\n"); if (common->fmt.type != buf->type) return -EINVAL; if (common->memory != V4L2_MEMORY_MMAP) { vpif_dbg(1, debug, "Invalid memory\n"); return -EINVAL; } return vb2_querybuf(&common->buffer_queue, buf); } /** * vpif_qbuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_buffer tbuf = *buf; vpif_dbg(2, debug, "vpif_qbuf\n"); if (common->fmt.type != tbuf.type) { vpif_err("invalid buffer type\n"); return -EINVAL; } if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_err("fh io not allowed\n"); return -EACCES; } return vb2_qbuf(&common->buffer_queue, buf); } /** * vpif_dqbuf() - query buffer handler * @file: file ptr * @priv: file handle * @buf: v4l2 buffer structure ptr */ static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_dqbuf\n"); return vb2_dqbuf(&common->buffer_queue, buf, (file->f_flags & O_NONBLOCK)); } /** * vpif_streamon() - streamon handler * @file: file ptr * @priv: file handle * @buftype: v4l2 buffer type */ static int vpif_streamon(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id]; struct vpif_params *vpif; int ret = 0; vpif_dbg(2, debug, "vpif_streamon\n"); vpif = &ch->vpifparams; if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) { vpif_dbg(1, debug, "buffer type not supported\n"); return -EINVAL; } /* If file handle is not allowed IO, return error */ if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_dbg(1, debug, "io not allowed\n"); return -EACCES; } /* If Streaming is already started, return error */ if (common->started) { vpif_dbg(1, debug, "channel->started\n"); return -EBUSY; } if ((ch->channel_id == VPIF_CHANNEL0_VIDEO && oth_ch->common[VPIF_VIDEO_INDEX].started && vpif->std_info.ycmux_mode == 0) || ((ch->channel_id == VPIF_CHANNEL1_VIDEO) && (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) { vpif_dbg(1, debug, "other channel is being used\n"); return -EBUSY; } ret = vpif_check_format(ch, &common->fmt.fmt.pix, 0); if (ret) return ret; /* Enable streamon on the sub device */ ret = v4l2_subdev_call(ch->sd, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "stream on failed in subdev\n"); return ret; } /* Call vb2_streamon to start streaming in videobuf2 */ ret = vb2_streamon(&common->buffer_queue, buftype); if (ret) { vpif_dbg(1, debug, "vb2_streamon\n"); return ret; } return ret; } /** * vpif_streamoff() - streamoff handler * @file: file ptr * @priv: file handle * @buftype: v4l2 buffer type */ static int vpif_streamoff(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; vpif_dbg(2, debug, "vpif_streamoff\n"); if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) { vpif_dbg(1, debug, "buffer type not supported\n"); return -EINVAL; } /* If io is allowed for this file handle, return error */ if (!fh->io_allowed[VPIF_VIDEO_INDEX]) { vpif_dbg(1, debug, "io not allowed\n"); return -EACCES; } /* If streaming is not started, return error */ if (!common->started) { vpif_dbg(1, debug, "channel->started\n"); return -EINVAL; } /* disable channel */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } else { enable_channel1(0); channel1_intr_enable(0); } common->started = 0; ret = v4l2_subdev_call(ch->sd, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) vpif_dbg(1, debug, "stream off failed in subdev\n"); return vb2_streamoff(&common->buffer_queue, buftype); } /** * vpif_input_to_subdev() - Maps input to sub device * @vpif_cfg - global config ptr * @chan_cfg - channel config ptr * @input_index - Given input index from application * * lookup the sub device information for a given input index. * we report all the inputs to application. inputs table also * has sub device name for the each input */ static int vpif_input_to_subdev( struct vpif_capture_config *vpif_cfg, struct vpif_capture_chan_config *chan_cfg, int input_index) { struct vpif_subdev_info *subdev_info; const char *subdev_name; int i; vpif_dbg(2, debug, "vpif_input_to_subdev\n"); subdev_name = chan_cfg->inputs[input_index].subdev_name; if (subdev_name == NULL) return -1; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdev_info[i]; if (!strcmp(subdev_info->name, subdev_name)) return i; } return -1; } /** * vpif_set_input() - Select an input * @vpif_cfg - global config ptr * @ch - channel * @_index - Given input index from application * * Select the given input. */ static int vpif_set_input( struct vpif_capture_config *vpif_cfg, struct channel_obj *ch, int index) { struct vpif_capture_chan_config *chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; struct vpif_subdev_info *subdev_info = NULL; struct v4l2_subdev *sd = NULL; u32 input = 0, output = 0; int sd_index; int ret; sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index); if (sd_index >= 0) { sd = vpif_obj.sd[sd_index]; subdev_info = &vpif_cfg->subdev_info[sd_index]; } /* first setup input path from sub device to vpif */ if (sd && vpif_cfg->setup_input_path) { ret = vpif_cfg->setup_input_path(ch->channel_id, subdev_info->name); if (ret < 0) { vpif_dbg(1, debug, "couldn't setup input path for the" \ " sub device %s, for input index %d\n", subdev_info->name, index); return ret; } } if (sd) { input = chan_cfg->inputs[index].input_route; output = chan_cfg->inputs[index].output_route; ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0); if (ret < 0 && ret != -ENOIOCTLCMD) { vpif_dbg(1, debug, "Failed to set input\n"); return ret; } } ch->input_idx = index; ch->sd = sd; /* copy interface parameters to vpif */ ch->vpifparams.iface = chan_cfg->vpif_if; /* update tvnorms from the sub device input info */ ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std; return 0; } /** * vpif_querystd() - querystd handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id * * This function is called to detect standard at the selected input */ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; int ret = 0; vpif_dbg(2, debug, "vpif_querystd\n"); /* Call querystd function of decoder device */ ret = v4l2_subdev_call(ch->sd, video, querystd, std_id); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; if (ret) { vpif_dbg(1, debug, "Failed to query standard for sub devices\n"); return ret; } return 0; } /** * vpif_g_std() - get STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; vpif_dbg(2, debug, "vpif_g_std\n"); *std = ch->video.stdid; return 0; } /** * vpif_s_std() - set STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret = 0; vpif_dbg(2, debug, "vpif_s_std\n"); if (common->started) { vpif_err("streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; /* Call encoder subdevice function to set the standard */ ch->video.stdid = std_id; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_err("Error getting the standard info\n"); return -EINVAL; } /* Configure the default format information */ vpif_config_format(ch); /* set standard in the sub device */ ret = v4l2_subdev_call(ch->sd, core, s_std, std_id); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } return 0; } /** * vpif_enum_input() - ENUMINPUT handler * @file: file ptr * @priv: file handle * @input: ptr to input structure */ static int vpif_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_capture_chan_config *chan_cfg; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; chan_cfg = &config->chan_config[ch->channel_id]; if (input->index >= chan_cfg->input_count) { vpif_dbg(1, debug, "Invalid input index\n"); return -EINVAL; } memcpy(input, &chan_cfg->inputs[input->index].input, sizeof(*input)); return 0; } /** * vpif_g_input() - Get INPUT handler * @file: file ptr * @priv: file handle * @index: ptr to input index */ static int vpif_g_input(struct file *file, void *priv, unsigned int *index) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; *index = ch->input_idx; return 0; } /** * vpif_s_input() - Set INPUT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_s_input(struct file *file, void *priv, unsigned int index) { struct vpif_capture_config *config = vpif_dev->platform_data; struct vpif_capture_chan_config *chan_cfg; struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; chan_cfg = &config->chan_config[ch->channel_id]; if (index >= chan_cfg->input_count) return -EINVAL; if (common->started) { vpif_err("Streaming in progress\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; return vpif_set_input(config, ch, index); } /** * vpif_enum_fmt_vid_cap() - ENUM_FMT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; if (fmt->index != 0) { vpif_dbg(1, debug, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb"); fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; } else { fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; strcpy(fmt->description, "YCbCr4:2:2 YC Planar"); fmt->pixelformat = V4L2_PIX_FMT_YUV422P; } return 0; } /** * vpif_try_fmt_vid_cap() - TRY_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; return vpif_check_format(ch, pixfmt, 1); } /** * vpif_g_fmt_vid_cap() - Set INPUT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; /* Fill in the information about format */ *fmt = common->fmt; return 0; } /** * vpif_s_fmt_vid_cap() - Set FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_pix_format *pixfmt; int ret = 0; vpif_dbg(2, debug, "%s\n", __func__); /* If streaming is started, return error */ if (common->started) { vpif_dbg(1, debug, "Streaming is started\n"); return -EBUSY; } if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) || (VPIF_CHANNEL1_VIDEO == ch->channel_id)) { if (!fh->initialized) { vpif_dbg(1, debug, "Channel Busy\n"); return -EBUSY; } } ret = v4l2_prio_check(&ch->prio, fh->prio); if (0 != ret) return ret; fh->initialized = 1; pixfmt = &fmt->fmt.pix; /* Check for valid field format */ ret = vpif_check_format(ch, pixfmt, 0); if (ret) return ret; /* store the format in the channel object */ common->fmt = *fmt; return 0; } /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_capture_config *config = vpif_dev->platform_data; cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; snprintf(cap->driver, sizeof(cap->driver), "%s", dev_name(vpif_dev)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(vpif_dev)); strlcpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } /** * vpif_g_priority() - get priority handler * @file: file ptr * @priv: file handle * @prio: ptr to v4l2_priority structure */ static int vpif_g_priority(struct file *file, void *priv, enum v4l2_priority *prio) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; *prio = v4l2_prio_max(&ch->prio); return 0; } /** * vpif_s_priority() - set priority handler * @file: file ptr * @priv: file handle * @prio: ptr to v4l2_priority structure */ static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_prio_change(&ch->prio, &fh->prio, p); } /** * vpif_cropcap() - cropcap handler * @file: file ptr * @priv: file handle * @crop: ptr to v4l2_cropcap structure */ static int vpif_cropcap(struct file *file, void *priv, struct v4l2_cropcap *crop) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; if (V4L2_BUF_TYPE_VIDEO_CAPTURE != crop->type) return -EINVAL; crop->bounds.left = 0; crop->bounds.top = 0; crop->bounds.height = common->height; crop->bounds.width = common->width; crop->defrect = crop->bounds; return 0; } /** * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; int ret; ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -EINVAL; return ret; } /** * vpif_query_dv_timings() - QUERY_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_query_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; int ret; ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; return ret; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; int ret; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) ret = 0; if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, " "horizontal back porch, horizontal sync, " "horizontal front porch, vertical back porch, " "vertical sync and vertical back porch " "must be defined\n"); return -EINVAL; } vid_ch->dv_timings = *timings; /* Configure video port timings */ std_info->eav2sav = bt->hbackporch + bt->hfrontporch + bt->hsync - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->vsize = bt->height * 2 + bt->vfrontporch + bt->vsync + bt->vbackporch + bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch; std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for " "interlaced BT format missing\n"); return -EINVAL; } } else { std_info->vsize = bt->height + bt->vfrontporch + bt->vsync + bt->vbackporch; std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; vid_ch->stdid = 0; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; struct video_obj *vid_ch = &ch->video; *timings = vid_ch->dv_timings; return 0; } /* * vpif_g_chip_ident() - Identify the chip * @file: file ptr * @priv: file handle * @chip: chip identity * * Returns zero or -EINVAL if read operations fails. */ static int vpif_g_chip_ident(struct file *file, void *priv, struct v4l2_dbg_chip_ident *chip) { chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) { vpif_dbg(2, debug, "match_type is invalid.\n"); return -EINVAL; } return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core, g_chip_ident, chip); } #ifdef CONFIG_VIDEO_ADV_DEBUG /* * vpif_dbg_g_register() - Read register * @file: file ptr * @priv: file handle * @reg: register to be read * * Debugging only * Returns zero or -EINVAL if read operations fails. */ static int vpif_dbg_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg){ struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(ch->sd, core, g_register, reg); } /* * vpif_dbg_s_register() - Write to register * @file: file ptr * @priv: file handle * @reg: register to be modified * * Debugging only * Returns zero or -EINVAL if write operations fails. */ static int vpif_dbg_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg) { struct vpif_fh *fh = priv; struct channel_obj *ch = fh->channel; return v4l2_subdev_call(ch->sd, core, s_register, reg); } #endif /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif capture ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .vidioc_g_priority = vpif_g_priority, .vidioc_s_priority = vpif_s_priority, .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap, .vidioc_enum_input = vpif_enum_input, .vidioc_s_input = vpif_s_input, .vidioc_g_input = vpif_g_input, .vidioc_reqbufs = vpif_reqbufs, .vidioc_querybuf = vpif_querybuf, .vidioc_querystd = vpif_querystd, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .vidioc_qbuf = vpif_qbuf, .vidioc_dqbuf = vpif_dqbuf, .vidioc_streamon = vpif_streamon, .vidioc_streamoff = vpif_streamoff, .vidioc_cropcap = vpif_cropcap, .vidioc_enum_dv_timings = vpif_enum_dv_timings, .vidioc_query_dv_timings = vpif_query_dv_timings, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_g_chip_ident = vpif_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vpif_dbg_g_register, .vidioc_s_register = vpif_dbg_s_register, #endif .vidioc_log_status = vpif_log_status, }; /* vpif file operations */ static struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = vpif_open, .release = vpif_release, .unlocked_ioctl = video_ioctl2, .mmap = vpif_mmap, .poll = vpif_poll }; /* vpif video template */ static struct video_device vpif_video_template = { .name = "vpif", .fops = &vpif_fops, .minor = -1, .ioctl_ops = &vpif_ioctl_ops, }; /** * initialize_vpif() - Initialize vpif data structures * * Allocate memory for data structures and initialize them */ static int initialize_vpif(void) { int err = 0, i, j; int free_channel_objects_index; /* Default number of buffers should be 3 */ if ((ch0_numbuffers > 0) && (ch0_numbuffers < config_params.min_numbuffers)) ch0_numbuffers = config_params.min_numbuffers; if ((ch1_numbuffers > 0) && (ch1_numbuffers < config_params.min_numbuffers)) ch1_numbuffers = config_params.min_numbuffers; /* Set buffer size to min buffers size if it is invalid */ if (ch0_bufsize < config_params.min_bufsize[VPIF_CHANNEL0_VIDEO]) ch0_bufsize = config_params.min_bufsize[VPIF_CHANNEL0_VIDEO]; if (ch1_bufsize < config_params.min_bufsize[VPIF_CHANNEL1_VIDEO]) ch1_bufsize = config_params.min_bufsize[VPIF_CHANNEL1_VIDEO]; config_params.numbuffers[VPIF_CHANNEL0_VIDEO] = ch0_numbuffers; config_params.numbuffers[VPIF_CHANNEL1_VIDEO] = ch1_numbuffers; if (ch0_numbuffers) { config_params.channel_bufsize[VPIF_CHANNEL0_VIDEO] = ch0_bufsize; } if (ch1_numbuffers) { config_params.channel_bufsize[VPIF_CHANNEL1_VIDEO] = ch1_bufsize; } /* Allocate memory for six channel objects */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { vpif_obj.dev[i] = kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { free_channel_objects_index = i; err = -ENOMEM; goto vpif_init_free_channel_objects; } } return 0; vpif_init_free_channel_objects: for (j = 0; j < free_channel_objects_index; j++) kfree(vpif_obj.dev[j]); return err; } /** * vpif_probe : This function probes the vpif capture driver * @pdev: platform device pointer * * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct vpif_capture_config *config; int i, j, k, err; int res_idx = 0; struct i2c_adapter *i2c_adap; struct channel_obj *ch; struct common_obj *common; struct video_device *vfd; struct resource *res; int subdev_count; size_t size; vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); return err; } while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) { for (i = res->start; i <= res->end; i++) { if (request_irq(i, vpif_channel_isr, IRQF_SHARED, "VPIF_Capture", (void *) (&vpif_obj.dev[res_idx]->channel_id))) { err = -EBUSY; for (j = 0; j < i; j++) free_irq(j, (void *) (&vpif_obj.dev[res_idx]->channel_id)); goto vpif_int_err; } } res_idx++; } for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Allocate memory for video device */ vfd = video_device_alloc(); if (NULL == vfd) { for (j = 0; j < i; j++) { ch = vpif_obj.dev[j]; video_device_release(ch->video_dev); } err = -ENOMEM; goto vpif_int_err; } /* Initialize field of video device */ *vfd = vpif_video_template; vfd->v4l2_dev = &vpif_obj.v4l2_dev; vfd->release = video_device_release; snprintf(vfd->name, sizeof(vfd->name), "VPIF_Capture_DRIVER_V%s", VPIF_CAPTURE_VERSION); /* Set video_dev to the video device */ ch->video_dev = vfd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) { size = resource_size(res); /* The resources are divided into two equal memory and when we * have HD output we can add them together */ for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; /* only enabled if second resource exists */ config_params.video_limit[ch->channel_id] = 0; if (size) config_params.video_limit[ch->channel_id] = size/2; } } i2c_adap = i2c_get_adapter(1); config = pdev->dev.platform_data; subdev_count = config->subdev_count; vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count, GFP_KERNEL); if (vpif_obj.sd == NULL) { vpif_err("unable to allocate memory for subdevice pointers\n"); err = -ENOMEM; goto vpif_sd_error; } for (i = 0; i < subdev_count; i++) { subdevdata = &config->subdev_info[i]; vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata->board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", subdevdata->name); } for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; common = &(ch->common[VPIF_VIDEO_INDEX]); spin_lock_init(&common->irqlock); mutex_init(&common->lock); ch->video_dev->lock = &common->lock; /* Initialize prio member of channel object */ v4l2_prio_init(&ch->prio); video_set_drvdata(ch->video_dev, ch); /* select input 0 */ err = vpif_set_input(config, ch, 0); if (err) goto probe_out; err = video_register_device(ch->video_dev, VFL_TYPE_GRABBER, (j ? 1 : 0)); if (err) goto probe_out; } v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n"); return 0; probe_out: for (k = 0; k < j; k++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[k]; /* Unregister video device */ video_unregister_device(ch->video_dev); } probe_subdev_out: /* free sub devices memory */ kfree(vpif_obj.sd); vpif_sd_error: for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { ch = vpif_obj.dev[i]; /* Note: does nothing if ch->video_dev == NULL */ video_device_release(ch->video_dev); } vpif_int_err: v4l2_device_unregister(&vpif_obj.v4l2_dev); for (i = 0; i < res_idx; i++) { res = platform_get_resource(pdev, IORESOURCE_IRQ, i); for (j = res->start; j <= res->end; j++) free_irq(j, (void *)(&vpif_obj.dev[i]->channel_id)); } return err; } /** * vpif_remove() - driver remove handler * @device: ptr to platform device structure * * The vidoe device is unregistered */ static int vpif_remove(struct platform_device *device) { int i; struct channel_obj *ch; v4l2_device_unregister(&vpif_obj.v4l2_dev); /* un-register device */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Unregister video device */ video_unregister_device(ch->video_dev); } return 0; } #ifdef CONFIG_PM /** * vpif_suspend: vpif device suspend */ static int vpif_suspend(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; mutex_lock(&common->lock); if (ch->usrs && common->io_usrs) { /* Disable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(0); channel0_intr_enable(0); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || common->started == 2) { enable_channel1(0); channel1_intr_enable(0); } } mutex_unlock(&common->lock); } return 0; } /* * vpif_resume: vpif device suspend */ static int vpif_resume(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; mutex_lock(&common->lock); if (ch->usrs && common->io_usrs) { /* Disable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(1); channel0_intr_enable(1); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || common->started == 2) { enable_channel1(1); channel1_intr_enable(1); } } mutex_unlock(&common->lock); } return 0; } static const struct dev_pm_ops vpif_dev_pm_ops = { .suspend = vpif_suspend, .resume = vpif_resume, }; #define vpif_pm_ops (&vpif_dev_pm_ops) #else #define vpif_pm_ops NULL #endif static __refdata struct platform_driver vpif_driver = { .driver = { .name = "vpif_capture", .owner = THIS_MODULE, .pm = vpif_pm_ops, }, .probe = vpif_probe, .remove = vpif_remove, }; /** * vpif_init: initialize the vpif driver * * This function registers device and driver to the kernel, requests irq * handler and allocates memory * for channel objects */ static __init int vpif_init(void) { return platform_driver_register(&vpif_driver); } /** * vpif_cleanup : This function clean up the vpif capture resources * * This will un-registers device and driver to the kernel, frees * requested irq handler and de-allocates memory allocated for channel * objects. */ static void vpif_cleanup(void) { struct platform_device *pdev; struct resource *res; int irq_num; int i = 0; pdev = container_of(vpif_dev, struct platform_device, dev); while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, i))) { for (irq_num = res->start; irq_num <= res->end; irq_num++) free_irq(irq_num, (void *)(&vpif_obj.dev[i]->channel_id)); i++; } platform_driver_unregister(&vpif_driver); kfree(vpif_obj.sd); for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) kfree(vpif_obj.dev[i]); } /* Function for module initialization and cleanup */ module_init(vpif_init); module_exit(vpif_cleanup);
gpl-2.0
imoseyon/leanKernel-shamu
drivers/media/pci/ivtv/ivtv-driver.c
2084
47760
/* ivtv driver initialization and card probing Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Main Driver file for the ivtv project: * Driver for the Conexant CX23415/CX23416 chip. * Author: Kevin Thayer (nufan_wfk at yahoo.com) * License: GPL * http://www.ivtvdriver.org * * ----- * MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com> * and Takeru KOMORIYA<komoriya@paken.org> * * AVerMedia M179 GPIO info by Chris Pinkham <cpinkham@bc2va.org> * using information provided by Jiun-Kuei Jung @ AVerMedia. * * Kurouto Sikou CX23416GYC-STVLP tested by K.Ohta <alpha292@bremen.or.jp> * using information from T.Adachi,Takeru KOMORIYA and others :-) * * Nagase TRANSGEAR 5000TV, Aopen VA2000MAX-STN6 and I/O data GV-MVP/RX * version by T.Adachi. Special thanks Mr.Suzuki */ #include "ivtv-driver.h" #include "ivtv-version.h" #include "ivtv-fileops.h" #include "ivtv-i2c.h" #include "ivtv-firmware.h" #include "ivtv-queue.h" #include "ivtv-udma.h" #include "ivtv-irq.h" #include "ivtv-mailbox.h" #include "ivtv-streams.h" #include "ivtv-ioctl.h" #include "ivtv-cards.h" #include "ivtv-vbi.h" #include "ivtv-routing.h" #include "ivtv-controls.h" #include "ivtv-gpio.h" #include <linux/dma-mapping.h> #include <media/tveeprom.h> #include <media/saa7115.h> #include <media/v4l2-chip-ident.h> #include "tuner-xc2028.h" /* If you have already X v4l cards, then set this to X. This way the device numbers stay matched. Example: you have a WinTV card without radio and a PVR-350 with. Normally this would give a video1 device together with a radio0 device for the PVR. By setting this to 1 you ensure that radio0 is now also radio1. */ int ivtv_first_minor; /* Callback for registering extensions */ int (*ivtv_ext_init)(struct ivtv *); EXPORT_SYMBOL(ivtv_ext_init); /* add your revision and whatnot here */ static struct pci_device_id ivtv_pci_tbl[] = { {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); /* ivtv instance counter */ static atomic_t ivtv_instance = ATOMIC_INIT(0); /* Parameter declarations */ static int cardtype[IVTV_MAX_CARDS]; static int tuner[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int radio[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int i2c_clock_period[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static unsigned int cardtype_c = 1; static unsigned int tuner_c = 1; static int radio_c = 1; static unsigned int i2c_clock_period_c = 1; static char pal[] = "---"; static char secam[] = "--"; static char ntsc[] = "-"; /* Buffers */ /* DMA Buffers, Default size in MB allocated */ #define IVTV_DEFAULT_ENC_MPG_BUFFERS 4 #define IVTV_DEFAULT_ENC_YUV_BUFFERS 2 #define IVTV_DEFAULT_ENC_VBI_BUFFERS 1 /* Exception: size in kB for this stream (MB is overkill) */ #define IVTV_DEFAULT_ENC_PCM_BUFFERS 320 #define IVTV_DEFAULT_DEC_MPG_BUFFERS 1 #define IVTV_DEFAULT_DEC_YUV_BUFFERS 1 /* Exception: size in kB for this stream (MB is way overkill) */ #define IVTV_DEFAULT_DEC_VBI_BUFFERS 64 static int enc_mpg_buffers = IVTV_DEFAULT_ENC_MPG_BUFFERS; static int enc_yuv_buffers = IVTV_DEFAULT_ENC_YUV_BUFFERS; static int enc_vbi_buffers = IVTV_DEFAULT_ENC_VBI_BUFFERS; static int enc_pcm_buffers = IVTV_DEFAULT_ENC_PCM_BUFFERS; static int dec_mpg_buffers = IVTV_DEFAULT_DEC_MPG_BUFFERS; static int dec_yuv_buffers = IVTV_DEFAULT_DEC_YUV_BUFFERS; static int dec_vbi_buffers = IVTV_DEFAULT_DEC_VBI_BUFFERS; static int ivtv_yuv_mode; static int ivtv_yuv_threshold = -1; static int ivtv_pci_latency = 1; int ivtv_debug; #ifdef CONFIG_VIDEO_ADV_DEBUG int ivtv_fw_debug; #endif static int tunertype = -1; static int newi2c = -1; module_param_array(tuner, int, &tuner_c, 0644); module_param_array(radio, int, &radio_c, 0644); module_param_array(cardtype, int, &cardtype_c, 0644); module_param_string(pal, pal, sizeof(pal), 0644); module_param_string(secam, secam, sizeof(secam), 0644); module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); module_param_named(debug,ivtv_debug, int, 0644); #ifdef CONFIG_VIDEO_ADV_DEBUG module_param_named(fw_debug, ivtv_fw_debug, int, 0644); #endif module_param(ivtv_pci_latency, int, 0644); module_param(ivtv_yuv_mode, int, 0644); module_param(ivtv_yuv_threshold, int, 0644); module_param(ivtv_first_minor, int, 0644); module_param(enc_mpg_buffers, int, 0644); module_param(enc_yuv_buffers, int, 0644); module_param(enc_vbi_buffers, int, 0644); module_param(enc_pcm_buffers, int, 0644); module_param(dec_mpg_buffers, int, 0644); module_param(dec_yuv_buffers, int, 0644); module_param(dec_vbi_buffers, int, 0644); module_param(tunertype, int, 0644); module_param(newi2c, int, 0644); module_param_array(i2c_clock_period, int, &i2c_clock_period_c, 0644); MODULE_PARM_DESC(tuner, "Tuner type selection,\n" "\t\t\tsee tuner.h for values"); MODULE_PARM_DESC(radio, "Enable or disable the radio. Use only if autodetection\n" "\t\t\tfails. 0 = disable, 1 = enable"); MODULE_PARM_DESC(cardtype, "Only use this option if your card is not detected properly.\n" "\t\tSpecify card type:\n" "\t\t\t 1 = WinTV PVR 250\n" "\t\t\t 2 = WinTV PVR 350\n" "\t\t\t 3 = WinTV PVR-150 or PVR-500\n" "\t\t\t 4 = AVerMedia M179\n" "\t\t\t 5 = YUAN MPG600/Kuroutoshikou iTVC16-STVLP\n" "\t\t\t 6 = YUAN MPG160/Kuroutoshikou iTVC15-STVLP\n" "\t\t\t 7 = YUAN PG600/DIAMONDMM PVR-550 (CX Falcon 2)\n" "\t\t\t 8 = Adaptec AVC-2410\n" "\t\t\t 9 = Adaptec AVC-2010\n" "\t\t\t10 = NAGASE TRANSGEAR 5000TV\n" "\t\t\t11 = AOpen VA2000MAX-STN6\n" "\t\t\t12 = YUAN MPG600GR/Kuroutoshikou CX23416GYC-STVLP\n" "\t\t\t13 = I/O Data GV-MVP/RX\n" "\t\t\t14 = I/O Data GV-MVP/RX2E\n" "\t\t\t15 = GOTVIEW PCI DVD\n" "\t\t\t16 = GOTVIEW PCI DVD2 Deluxe\n" "\t\t\t17 = Yuan MPC622\n" "\t\t\t18 = Digital Cowboy DCT-MTVP1\n" "\t\t\t19 = Yuan PG600V2/GotView PCI DVD Lite\n" "\t\t\t20 = Club3D ZAP-TV1x01\n" "\t\t\t21 = AverTV MCE 116 Plus\n" "\t\t\t22 = ASUS Falcon2\n" "\t\t\t23 = AverMedia PVR-150 Plus\n" "\t\t\t24 = AverMedia EZMaker PCI Deluxe\n" "\t\t\t25 = AverMedia M104 (not yet working)\n" "\t\t\t26 = Buffalo PC-MV5L/PCI\n" "\t\t\t27 = AVerMedia UltraTV 1500 MCE\n" "\t\t\t28 = Sony VAIO Giga Pocket (ENX Kikyou)\n" "\t\t\t 0 = Autodetect (default)\n" "\t\t\t-1 = Ignore this card\n\t\t"); MODULE_PARM_DESC(pal, "Set PAL standard: BGH, DK, I, M, N, Nc, 60"); MODULE_PARM_DESC(secam, "Set SECAM standard: BGH, DK, L, LC"); MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J (Japan), K (South Korea)"); MODULE_PARM_DESC(tunertype, "Specify tuner type:\n" "\t\t\t 0 = tuner for PAL-B/G/H/D/K/I, SECAM-B/G/H/D/K/L/Lc\n" "\t\t\t 1 = tuner for NTSC-M/J/K, PAL-M/N/Nc\n" "\t\t\t-1 = Autodetect (default)\n"); MODULE_PARM_DESC(debug, "Debug level (bitmask). Default: 0\n" "\t\t\t 1/0x0001: warning\n" "\t\t\t 2/0x0002: info\n" "\t\t\t 4/0x0004: mailbox\n" "\t\t\t 8/0x0008: ioctl\n" "\t\t\t 16/0x0010: file\n" "\t\t\t 32/0x0020: dma\n" "\t\t\t 64/0x0040: irq\n" "\t\t\t 128/0x0080: decoder\n" "\t\t\t 256/0x0100: yuv\n" "\t\t\t 512/0x0200: i2c\n" "\t\t\t1024/0x0400: high volume\n"); #ifdef CONFIG_VIDEO_ADV_DEBUG MODULE_PARM_DESC(fw_debug, "Enable code for debugging firmware problems. Default: 0\n"); #endif MODULE_PARM_DESC(ivtv_pci_latency, "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n" "\t\t\tDefault: Yes"); MODULE_PARM_DESC(ivtv_yuv_mode, "Specify the yuv playback mode:\n" "\t\t\t0 = interlaced\n\t\t\t1 = progressive\n\t\t\t2 = auto\n" "\t\t\tDefault: 0 (interlaced)"); MODULE_PARM_DESC(ivtv_yuv_threshold, "If ivtv_yuv_mode is 2 (auto) then playback content as\n\t\tprogressive if src height <= ivtv_yuvthreshold\n" "\t\t\tDefault: 480"); MODULE_PARM_DESC(enc_mpg_buffers, "Encoder MPG Buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_MPG_BUFFERS)); MODULE_PARM_DESC(enc_yuv_buffers, "Encoder YUV Buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_YUV_BUFFERS)); MODULE_PARM_DESC(enc_vbi_buffers, "Encoder VBI Buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_VBI_BUFFERS)); MODULE_PARM_DESC(enc_pcm_buffers, "Encoder PCM buffers (in kB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_ENC_PCM_BUFFERS)); MODULE_PARM_DESC(dec_mpg_buffers, "Decoder MPG buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_MPG_BUFFERS)); MODULE_PARM_DESC(dec_yuv_buffers, "Decoder YUV buffers (in MB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_YUV_BUFFERS)); MODULE_PARM_DESC(dec_vbi_buffers, "Decoder VBI buffers (in kB)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_DEC_VBI_BUFFERS)); MODULE_PARM_DESC(newi2c, "Use new I2C implementation\n" "\t\t\t-1 is autodetect, 0 is off, 1 is on\n" "\t\t\tDefault is autodetect"); MODULE_PARM_DESC(i2c_clock_period, "Period of SCL for the I2C bus controlled by the CX23415/6\n" "\t\t\tMin: 10 usec (100 kHz), Max: 4500 usec (222 Hz)\n" "\t\t\tDefault: " __stringify(IVTV_DEFAULT_I2C_CLOCK_PERIOD)); MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first card"); MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); MODULE_DESCRIPTION("CX23415/CX23416 driver"); MODULE_SUPPORTED_DEVICE ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n" "\t\t\tYuan MPG series and similar)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IVTV_VERSION); #if defined(CONFIG_MODULES) && defined(MODULE) static void request_module_async(struct work_struct *work) { struct ivtv *dev = container_of(work, struct ivtv, request_module_wk); /* Make sure ivtv-alsa module is loaded */ request_module("ivtv-alsa"); /* Initialize ivtv-alsa for this instance of the cx18 device */ if (ivtv_ext_init != NULL) ivtv_ext_init(dev); } static void request_modules(struct ivtv *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct ivtv *dev) { flush_work(&dev->request_module_wk); } #else #define request_modules(dev) #define flush_request_modules(dev) #endif /* CONFIG_MODULES */ void ivtv_clear_irq_mask(struct ivtv *itv, u32 mask) { itv->irqmask &= ~mask; write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK); } void ivtv_set_irq_mask(struct ivtv *itv, u32 mask) { itv->irqmask |= mask; write_reg_sync(itv->irqmask, IVTV_REG_IRQMASK); } int ivtv_set_output_mode(struct ivtv *itv, int mode) { int old_mode; spin_lock(&itv->lock); old_mode = itv->output_mode; if (old_mode == 0) itv->output_mode = old_mode = mode; spin_unlock(&itv->lock); return old_mode; } struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv) { switch (itv->output_mode) { case OUT_MPG: return &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; case OUT_YUV: return &itv->streams[IVTV_DEC_STREAM_TYPE_YUV]; default: return NULL; } } int ivtv_waitq(wait_queue_head_t *waitq) { DEFINE_WAIT(wait); prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE); schedule(); finish_wait(waitq, &wait); return signal_pending(current) ? -EINTR : 0; } /* Generic utility functions */ int ivtv_msleep_timeout(unsigned int msecs, int intr) { int timeout = msecs_to_jiffies(msecs); do { set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); timeout = schedule_timeout(timeout); if (intr) { int ret = signal_pending(current); if (ret) return ret; } } while (timeout); return 0; } /* Release ioremapped memory */ static void ivtv_iounmap(struct ivtv *itv) { if (itv == NULL) return; /* Release registers memory */ if (itv->reg_mem != NULL) { IVTV_DEBUG_INFO("releasing reg_mem\n"); iounmap(itv->reg_mem); itv->reg_mem = NULL; } /* Release io memory */ if (itv->has_cx23415 && itv->dec_mem != NULL) { IVTV_DEBUG_INFO("releasing dec_mem\n"); iounmap(itv->dec_mem); } itv->dec_mem = NULL; /* Release io memory */ if (itv->enc_mem != NULL) { IVTV_DEBUG_INFO("releasing enc_mem\n"); iounmap(itv->enc_mem); itv->enc_mem = NULL; } } /* Hauppauge card? get values from tveeprom */ void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv) { u8 eedata[256]; itv->i2c_client.addr = 0xA0 >> 1; tveeprom_read(&itv->i2c_client, eedata, sizeof(eedata)); tveeprom_hauppauge_analog(&itv->i2c_client, tv, eedata); } static void ivtv_process_eeprom(struct ivtv *itv) { struct tveeprom tv; int pci_slot = PCI_SLOT(itv->pdev->devfn); ivtv_read_eeprom(itv, &tv); /* Many thanks to Steven Toth from Hauppauge for providing the model numbers */ switch (tv.model) { /* In a few cases the PCI subsystem IDs do not correctly identify the card. A better method is to check the model number from the eeprom instead. */ case 30012 ... 30039: /* Low profile PVR250 */ case 32000 ... 32999: case 48000 ... 48099: /* 48??? range are PVR250s with a cx23415 */ case 48400 ... 48599: itv->card = ivtv_get_card(IVTV_CARD_PVR_250); break; case 48100 ... 48399: case 48600 ... 48999: itv->card = ivtv_get_card(IVTV_CARD_PVR_350); break; case 23000 ... 23999: /* PVR500 */ case 25000 ... 25999: /* Low profile PVR150 */ case 26000 ... 26999: /* Regular PVR150 */ itv->card = ivtv_get_card(IVTV_CARD_PVR_150); break; case 0: IVTV_ERR("Invalid EEPROM\n"); return; default: IVTV_ERR("Unknown model %d, defaulting to PVR-150\n", tv.model); itv->card = ivtv_get_card(IVTV_CARD_PVR_150); break; } switch (tv.model) { /* Old style PVR350 (with an saa7114) uses this input for the tuner. */ case 48254: itv->card = ivtv_get_card(IVTV_CARD_PVR_350_V1); break; default: break; } itv->v4l2_cap = itv->card->v4l2_capabilities; itv->card_name = itv->card->name; itv->card_i2c = itv->card->i2c; /* If this is a PVR500 then it should be possible to detect whether it is the first or second unit by looking at the subsystem device ID: is bit 4 is set, then it is the second unit (according to info from Hauppauge). However, while this works for most cards, I have seen a few PVR500 cards where both units have the same subsystem ID. So instead I look at the reported 'PCI slot' (which is the slot on the PVR500 PCI bridge) and if it is 8, then it is assumed to be the first unit, otherwise it is the second unit. It is possible that it is a different slot when ivtv is used in Xen, in that case I ignore this card here. The worst that can happen is that the card presents itself with a non-working radio device. This detection is needed since the eeprom reports incorrectly that a radio is present on the second unit. */ if (tv.model / 1000 == 23) { static const struct ivtv_card_tuner_i2c ivtv_i2c_radio = { .radio = { 0x60, I2C_CLIENT_END }, .demod = { 0x43, I2C_CLIENT_END }, .tv = { 0x61, I2C_CLIENT_END }, }; itv->card_name = "WinTV PVR 500"; itv->card_i2c = &ivtv_i2c_radio; if (pci_slot == 8 || pci_slot == 9) { int is_first = (pci_slot & 1) == 0; itv->card_name = is_first ? "WinTV PVR 500 (unit #1)" : "WinTV PVR 500 (unit #2)"; if (!is_first) { IVTV_INFO("Correcting tveeprom data: no radio present on second unit\n"); tv.has_radio = 0; } } } IVTV_INFO("Autodetected %s\n", itv->card_name); switch (tv.tuner_hauppauge_model) { case 85: case 99: case 112: itv->pvr150_workaround = 1; break; default: break; } if (tv.tuner_type == TUNER_ABSENT) IVTV_ERR("tveeprom cannot autodetect tuner!\n"); if (itv->options.tuner == -1) itv->options.tuner = tv.tuner_type; if (itv->options.radio == -1) itv->options.radio = (tv.has_radio != 0); /* only enable newi2c if an IR blaster is present */ if (itv->options.newi2c == -1 && tv.has_ir) { itv->options.newi2c = (tv.has_ir & 4) ? 1 : 0; if (itv->options.newi2c) { IVTV_INFO("Reopen i2c bus for IR-blaster support\n"); exit_ivtv_i2c(itv); init_ivtv_i2c(itv); } } if (itv->std != 0) /* user specified tuner standard */ return; /* autodetect tuner standard */ if (tv.tuner_formats & V4L2_STD_PAL) { IVTV_DEBUG_INFO("PAL tuner detected\n"); itv->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H; } else if (tv.tuner_formats & V4L2_STD_NTSC) { IVTV_DEBUG_INFO("NTSC tuner detected\n"); itv->std |= V4L2_STD_NTSC_M; } else if (tv.tuner_formats & V4L2_STD_SECAM) { IVTV_DEBUG_INFO("SECAM tuner detected\n"); itv->std |= V4L2_STD_SECAM_L; } else { IVTV_INFO("No tuner detected, default to NTSC-M\n"); itv->std |= V4L2_STD_NTSC_M; } } static v4l2_std_id ivtv_parse_std(struct ivtv *itv) { switch (pal[0]) { case '6': tunertype = 0; return V4L2_STD_PAL_60; case 'b': case 'B': case 'g': case 'G': case 'h': case 'H': tunertype = 0; return V4L2_STD_PAL_BG | V4L2_STD_PAL_H; case 'n': case 'N': tunertype = 1; if (pal[1] == 'c' || pal[1] == 'C') return V4L2_STD_PAL_Nc; return V4L2_STD_PAL_N; case 'i': case 'I': tunertype = 0; return V4L2_STD_PAL_I; case 'd': case 'D': case 'k': case 'K': tunertype = 0; return V4L2_STD_PAL_DK; case 'M': case 'm': tunertype = 1; return V4L2_STD_PAL_M; case '-': break; default: IVTV_WARN("pal= argument not recognised\n"); return 0; } switch (secam[0]) { case 'b': case 'B': case 'g': case 'G': case 'h': case 'H': tunertype = 0; return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H; case 'd': case 'D': case 'k': case 'K': tunertype = 0; return V4L2_STD_SECAM_DK; case 'l': case 'L': tunertype = 0; if (secam[1] == 'C' || secam[1] == 'c') return V4L2_STD_SECAM_LC; return V4L2_STD_SECAM_L; case '-': break; default: IVTV_WARN("secam= argument not recognised\n"); return 0; } switch (ntsc[0]) { case 'm': case 'M': tunertype = 1; return V4L2_STD_NTSC_M; case 'j': case 'J': tunertype = 1; return V4L2_STD_NTSC_M_JP; case 'k': case 'K': tunertype = 1; return V4L2_STD_NTSC_M_KR; case '-': break; default: IVTV_WARN("ntsc= argument not recognised\n"); return 0; } /* no match found */ return 0; } static void ivtv_process_options(struct ivtv *itv) { const char *chipname; int i, j; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers * 1024; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers * 1024; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers * 1024; itv->options.kilobytes[IVTV_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers; itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_MPG] = dec_mpg_buffers * 1024; itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_YUV] = dec_yuv_buffers * 1024; itv->options.kilobytes[IVTV_DEC_STREAM_TYPE_VBI] = dec_vbi_buffers; itv->options.cardtype = cardtype[itv->instance]; itv->options.tuner = tuner[itv->instance]; itv->options.radio = radio[itv->instance]; itv->options.i2c_clock_period = i2c_clock_period[itv->instance]; if (itv->options.i2c_clock_period == -1) itv->options.i2c_clock_period = IVTV_DEFAULT_I2C_CLOCK_PERIOD; else if (itv->options.i2c_clock_period < 10) itv->options.i2c_clock_period = 10; else if (itv->options.i2c_clock_period > 4500) itv->options.i2c_clock_period = 4500; itv->options.newi2c = newi2c; if (tunertype < -1 || tunertype > 1) { IVTV_WARN("Invalid tunertype argument, will autodetect instead\n"); tunertype = -1; } itv->std = ivtv_parse_std(itv); if (itv->std == 0 && tunertype >= 0) itv->std = tunertype ? V4L2_STD_MN : (V4L2_STD_ALL & ~V4L2_STD_MN); itv->has_cx23415 = (itv->pdev->device == PCI_DEVICE_ID_IVTV15); chipname = itv->has_cx23415 ? "cx23415" : "cx23416"; if (itv->options.cardtype == -1) { IVTV_INFO("Ignore card (detected %s based chip)\n", chipname); return; } if ((itv->card = ivtv_get_card(itv->options.cardtype - 1))) { IVTV_INFO("User specified %s card (detected %s based chip)\n", itv->card->name, chipname); } else if (itv->options.cardtype != 0) { IVTV_ERR("Unknown user specified type, trying to autodetect card\n"); } if (itv->card == NULL) { if (itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE || itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT1 || itv->pdev->subsystem_vendor == IVTV_PCI_ID_HAUPPAUGE_ALT2) { itv->card = ivtv_get_card(itv->has_cx23415 ? IVTV_CARD_PVR_350 : IVTV_CARD_PVR_150); IVTV_INFO("Autodetected Hauppauge card (%s based)\n", chipname); } } if (itv->card == NULL) { for (i = 0; (itv->card = ivtv_get_card(i)); i++) { if (itv->card->pci_list == NULL) continue; for (j = 0; itv->card->pci_list[j].device; j++) { if (itv->pdev->device != itv->card->pci_list[j].device) continue; if (itv->pdev->subsystem_vendor != itv->card->pci_list[j].subsystem_vendor) continue; if (itv->pdev->subsystem_device != itv->card->pci_list[j].subsystem_device) continue; IVTV_INFO("Autodetected %s card (%s based)\n", itv->card->name, chipname); goto done; } } } done: if (itv->card == NULL) { itv->card = ivtv_get_card(IVTV_CARD_PVR_150); IVTV_ERR("Unknown card: vendor/device: [%04x:%04x]\n", itv->pdev->vendor, itv->pdev->device); IVTV_ERR(" subsystem vendor/device: [%04x:%04x]\n", itv->pdev->subsystem_vendor, itv->pdev->subsystem_device); IVTV_ERR(" %s based\n", chipname); IVTV_ERR("Defaulting to %s card\n", itv->card->name); IVTV_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n"); IVTV_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n"); IVTV_ERR("Prefix your subject line with [UNKNOWN IVTV CARD].\n"); } itv->v4l2_cap = itv->card->v4l2_capabilities; itv->card_name = itv->card->name; itv->card_i2c = itv->card->i2c; } /* Precondition: the ivtv structure has been memset to 0. Only the dev and num fields have been filled in. No assumptions on the card type may be made here (see ivtv_init_struct2 for that). */ static int ivtv_init_struct1(struct ivtv *itv) { struct sched_param param = { .sched_priority = 99 }; itv->base_addr = pci_resource_start(itv->pdev, 0); itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ mutex_init(&itv->serialize_lock); mutex_init(&itv->i2c_bus_lock); mutex_init(&itv->udma.lock); spin_lock_init(&itv->lock); spin_lock_init(&itv->dma_reg_lock); init_kthread_worker(&itv->irq_worker); itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, itv->v4l2_dev.name); if (IS_ERR(itv->irq_worker_task)) { IVTV_ERR("Could not create ivtv task\n"); return -1; } /* must use the FIFO scheduler as it is realtime sensitive */ sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param); init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); /* Initial settings */ itv->cxhdl.port = CX2341X_PORT_MEMORY; itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI; init_waitqueue_head(&itv->eos_waitq); init_waitqueue_head(&itv->event_waitq); init_waitqueue_head(&itv->vsync_waitq); init_waitqueue_head(&itv->dma_waitq); init_timer(&itv->dma_timer); itv->dma_timer.function = ivtv_unfinished_dma; itv->dma_timer.data = (unsigned long)itv; itv->cur_dma_stream = -1; itv->cur_pio_stream = -1; /* Ctrls */ itv->speed = 1000; /* VBI */ itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; itv->vbi.sliced_in = &itv->vbi.in.fmt.sliced; /* Init the sg table for osd/yuv output */ sg_init_table(itv->udma.SGlist, IVTV_DMA_SG_OSD_ENT); /* OSD */ itv->osd_global_alpha_state = 1; itv->osd_global_alpha = 255; /* YUV */ atomic_set(&itv->yuv_info.next_dma_frame, -1); itv->yuv_info.lace_mode = ivtv_yuv_mode; itv->yuv_info.lace_threshold = ivtv_yuv_threshold; itv->yuv_info.max_frames_buffered = 3; itv->yuv_info.track_osd = 1; return 0; } /* Second initialization part. Here the card type has been autodetected. */ static void ivtv_init_struct2(struct ivtv *itv) { int i; for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) if (itv->card->video_inputs[i].video_type == 0) break; itv->nof_inputs = i; for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) if (itv->card->audio_inputs[i].audio_type == 0) break; itv->nof_audio_inputs = i; if (itv->card->hw_all & IVTV_HW_CX25840) { itv->vbi.sliced_size = 288; /* multiple of 16, real size = 284 */ } else { itv->vbi.sliced_size = 64; /* multiple of 16, real size = 52 */ } /* Find tuner input */ for (i = 0; i < itv->nof_inputs; i++) { if (itv->card->video_inputs[i].video_type == IVTV_CARD_INPUT_VID_TUNER) break; } if (i == itv->nof_inputs) i = 0; itv->active_input = i; itv->audio_input = itv->card->video_inputs[i].audio_index; } static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev, const struct pci_device_id *pci_id) { u16 cmd; unsigned char pci_latency; IVTV_DEBUG_INFO("Enabling pci device\n"); if (pci_enable_device(pdev)) { IVTV_ERR("Can't enable device!\n"); return -EIO; } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { IVTV_ERR("No suitable DMA available.\n"); return -EIO; } if (!request_mem_region(itv->base_addr, IVTV_ENCODER_SIZE, "ivtv encoder")) { IVTV_ERR("Cannot request encoder memory region.\n"); return -EIO; } if (!request_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE, "ivtv registers")) { IVTV_ERR("Cannot request register memory region.\n"); release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); return -EIO; } if (itv->has_cx23415 && !request_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE, "ivtv decoder")) { IVTV_ERR("Cannot request decoder memory region.\n"); release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); return -EIO; } /* Check for bus mastering */ pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_MASTER)) { IVTV_DEBUG_INFO("Attempting to enable Bus Mastering\n"); pci_set_master(pdev); pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_MASTER)) { IVTV_ERR("Bus Mastering is not enabled\n"); return -ENXIO; } } IVTV_DEBUG_INFO("Bus Mastering Enabled.\n"); pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); if (pci_latency < 64 && ivtv_pci_latency) { IVTV_INFO("Unreasonably low latency timer, " "setting to 64 (was %d)\n", pci_latency); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); } /* This config space value relates to DMA latencies. The default value 0x8080 is too low however and will lead to DMA errors. 0xffff is the max value which solves these problems. */ pci_write_config_dword(pdev, 0x40, 0xffff); IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, " "irq: %d, latency: %d, memory: 0x%llx\n", pdev->device, pdev->revision, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->irq, pci_latency, (u64)itv->base_addr); return 0; } static void ivtv_load_and_init_modules(struct ivtv *itv) { u32 hw = itv->card->hw_all; unsigned i; /* check which i2c devices are actually found */ for (i = 0; i < 32; i++) { u32 device = 1 << i; if (!(device & hw)) continue; if (device == IVTV_HW_GPIO || device == IVTV_HW_TVEEPROM) { /* GPIO and TVEEPROM do not use i2c probing */ itv->hw_flags |= device; continue; } if (ivtv_i2c_register(itv, i) == 0) itv->hw_flags |= device; } /* probe for legacy IR controllers that aren't in card definitions */ if ((itv->hw_flags & IVTV_HW_IR_ANY) == 0) ivtv_i2c_new_ir_legacy(itv); if (itv->card->hw_all & IVTV_HW_CX25840) itv->sd_video = ivtv_find_hw(itv, IVTV_HW_CX25840); else if (itv->card->hw_all & IVTV_HW_SAA717X) itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA717X); else if (itv->card->hw_all & IVTV_HW_SAA7114) itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7114); else itv->sd_video = ivtv_find_hw(itv, IVTV_HW_SAA7115); itv->sd_audio = ivtv_find_hw(itv, itv->card->hw_audio_ctrl); itv->sd_muxer = ivtv_find_hw(itv, itv->card->hw_muxer); hw = itv->hw_flags; if (itv->card->type == IVTV_CARD_CX23416GYC) { /* Several variations of this card exist, detect which card type should be used. */ if ((hw & (IVTV_HW_UPD64031A | IVTV_HW_UPD6408X)) == 0) itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGRYCS); else if ((hw & IVTV_HW_UPD64031A) == 0) itv->card = ivtv_get_card(IVTV_CARD_CX23416GYC_NOGR); } else if (itv->card->type == IVTV_CARD_GV_MVPRX || itv->card->type == IVTV_CARD_GV_MVPRX2E) { /* The crystal frequency of GVMVPRX is 24.576MHz */ v4l2_subdev_call(itv->sd_video, video, s_crystal_freq, SAA7115_FREQ_24_576_MHZ, SAA7115_FREQ_FL_UCGC); } if (hw & IVTV_HW_CX25840) { itv->vbi.raw_decoder_line_size = 1444; itv->vbi.raw_decoder_sav_odd_field = 0x20; itv->vbi.raw_decoder_sav_even_field = 0x60; itv->vbi.sliced_decoder_line_size = 272; itv->vbi.sliced_decoder_sav_odd_field = 0xB0; itv->vbi.sliced_decoder_sav_even_field = 0xF0; } if (hw & IVTV_HW_SAA711X) { struct v4l2_dbg_chip_ident v; /* determine the exact saa711x model */ itv->hw_flags &= ~IVTV_HW_SAA711X; v.match.type = V4L2_CHIP_MATCH_I2C_DRIVER; strlcpy(v.match.name, "saa7115", sizeof(v.match.name)); ivtv_call_hw(itv, IVTV_HW_SAA711X, core, g_chip_ident, &v); if (v.ident == V4L2_IDENT_SAA7114) { itv->hw_flags |= IVTV_HW_SAA7114; /* VBI is not yet supported by the saa7114 driver. */ itv->v4l2_cap &= ~(V4L2_CAP_SLICED_VBI_CAPTURE|V4L2_CAP_VBI_CAPTURE); } else { itv->hw_flags |= IVTV_HW_SAA7115; } itv->vbi.raw_decoder_line_size = 1443; itv->vbi.raw_decoder_sav_odd_field = 0x25; itv->vbi.raw_decoder_sav_even_field = 0x62; itv->vbi.sliced_decoder_line_size = 51; itv->vbi.sliced_decoder_sav_odd_field = 0xAB; itv->vbi.sliced_decoder_sav_even_field = 0xEC; } if (hw & IVTV_HW_SAA717X) { itv->vbi.raw_decoder_line_size = 1443; itv->vbi.raw_decoder_sav_odd_field = 0x25; itv->vbi.raw_decoder_sav_even_field = 0x62; itv->vbi.sliced_decoder_line_size = 51; itv->vbi.sliced_decoder_sav_odd_field = 0xAB; itv->vbi.sliced_decoder_sav_even_field = 0xEC; } } static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { int retval = 0; int vbi_buf_size; struct ivtv *itv; itv = kzalloc(sizeof(struct ivtv), GFP_ATOMIC); if (itv == NULL) return -ENOMEM; itv->pdev = pdev; itv->instance = v4l2_device_set_name(&itv->v4l2_dev, "ivtv", &ivtv_instance); retval = v4l2_device_register(&pdev->dev, &itv->v4l2_dev); if (retval) { kfree(itv); return retval; } IVTV_INFO("Initializing card %d\n", itv->instance); ivtv_process_options(itv); if (itv->options.cardtype == -1) { retval = -ENODEV; goto err; } if (ivtv_init_struct1(itv)) { retval = -ENOMEM; goto err; } retval = cx2341x_handler_init(&itv->cxhdl, 50); if (retval) goto err; itv->v4l2_dev.ctrl_handler = &itv->cxhdl.hdl; itv->cxhdl.ops = &ivtv_cxhdl_ops; itv->cxhdl.priv = itv; itv->cxhdl.func = ivtv_api_func; IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr); /* PCI Device Setup */ retval = ivtv_setup_pci(itv, pdev, pci_id); if (retval == -EIO) goto free_worker; if (retval == -ENXIO) goto free_mem; /* map io memory */ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); if (!itv->enc_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 " "encoder memory\n"); IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of " "vmalloc address space for this window\n"); IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n"); IVTV_ERR("Use the vmalloc= kernel command line option to set " "VmallocTotal to a larger value\n"); retval = -ENOMEM; goto free_mem; } if (itv->has_cx23415) { IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); if (!itv->dec_mem) { IVTV_ERR("ioremap failed. Can't get a window into " "CX23415 decoder memory\n"); IVTV_ERR("Each capture card with a CX23415 needs 8 MB " "of vmalloc address space for this window\n"); IVTV_ERR("Check the output of 'grep Vmalloc " "/proc/meminfo'\n"); IVTV_ERR("Use the vmalloc= kernel command line option " "to set VmallocTotal to a larger value\n"); retval = -ENOMEM; goto free_mem; } } else { itv->dec_mem = itv->enc_mem; } /* map registers memory */ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); itv->reg_mem = ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (!itv->reg_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 " "register space\n"); IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of " "vmalloc address space for this window\n"); IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n"); IVTV_ERR("Use the vmalloc= kernel command line option to set " "VmallocTotal to a larger value\n"); retval = -ENOMEM; goto free_io; } retval = ivtv_gpio_init(itv); if (retval) goto free_io; /* active i2c */ IVTV_DEBUG_INFO("activating i2c...\n"); if (init_ivtv_i2c(itv)) { IVTV_ERR("Could not initialize i2c\n"); goto free_io; } if (itv->card->hw_all & IVTV_HW_TVEEPROM) { /* Based on the model number the cardtype may be changed. The PCI IDs are not always reliable. */ ivtv_process_eeprom(itv); } if (itv->card->comment) IVTV_INFO("%s", itv->card->comment); if (itv->card->v4l2_capabilities == 0) { /* card was detected but is not supported */ retval = -ENODEV; goto free_i2c; } if (itv->std == 0) { itv->std = V4L2_STD_NTSC_M; } if (itv->options.tuner == -1) { int i; for (i = 0; i < IVTV_CARD_MAX_TUNERS; i++) { if ((itv->std & itv->card->tuners[i].std) == 0) continue; itv->options.tuner = itv->card->tuners[i].tuner; break; } } /* if no tuner was found, then pick the first tuner in the card list */ if (itv->options.tuner == -1 && itv->card->tuners[0].std) { itv->std = itv->card->tuners[0].std; if (itv->std & V4L2_STD_PAL) itv->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H; else if (itv->std & V4L2_STD_NTSC) itv->std = V4L2_STD_NTSC_M; else if (itv->std & V4L2_STD_SECAM) itv->std = V4L2_STD_SECAM_L; itv->options.tuner = itv->card->tuners[0].tuner; } if (itv->options.radio == -1) itv->options.radio = (itv->card->radio_input.audio_type != 0); /* The card is now fully identified, continue with card-specific initialization. */ ivtv_init_struct2(itv); ivtv_load_and_init_modules(itv); if (itv->std & V4L2_STD_525_60) { itv->is_60hz = 1; itv->is_out_60hz = 1; } else { itv->is_50hz = 1; itv->is_out_50hz = 1; } itv->yuv_info.osd_full_w = 720; itv->yuv_info.osd_full_h = itv->is_out_50hz ? 576 : 480; itv->yuv_info.v4l2_src_w = itv->yuv_info.osd_full_w; itv->yuv_info.v4l2_src_h = itv->yuv_info.osd_full_h; cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz); itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_MPG] = 0x08000; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_PCM] = 0x01200; itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_MPG] = 0x10000; itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_YUV] = 0x10000; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_YUV] = 0x08000; /* Setup VBI Raw Size. Should be big enough to hold PAL. It is possible to switch between PAL and NTSC, so we need to take the largest size here. */ /* 1456 is multiple of 16, real size = 1444 */ itv->vbi.raw_size = 1456; /* We use a buffer size of 1/2 of the total size needed for a frame. This is actually very useful, since we now receive a field at a time and that makes 'compressing' the raw data down to size by stripping off the SAV codes a lot easier. Note: having two different buffer sizes prevents standard switching on the fly. We need to find a better solution... */ vbi_buf_size = itv->vbi.raw_size * (itv->is_60hz ? 24 : 36) / 2; itv->stream_buf_size[IVTV_ENC_STREAM_TYPE_VBI] = vbi_buf_size; itv->stream_buf_size[IVTV_DEC_STREAM_TYPE_VBI] = sizeof(struct v4l2_sliced_vbi_data) * 36; if (itv->options.radio > 0) itv->v4l2_cap |= V4L2_CAP_RADIO; if (itv->options.tuner > -1) { struct tuner_setup setup; setup.addr = ADDR_UNSET; setup.type = itv->options.tuner; setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ if (itv->options.radio > 0) setup.mode_mask |= T_RADIO; setup.tuner_callback = (setup.type == TUNER_XC2028) ? ivtv_reset_tuner_gpio : NULL; ivtv_call_all(itv, tuner, s_type_addr, &setup); if (setup.type == TUNER_XC2028) { static struct xc2028_ctrl ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, }; struct v4l2_priv_tun_config cfg = { .tuner = itv->options.tuner, .priv = &ctrl, }; ivtv_call_all(itv, tuner, s_config, &cfg); } } /* The tuner is fixed to the standard. The other inputs (e.g. S-Video) are not. */ itv->tuner_std = itv->std; if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { struct v4l2_ctrl_handler *hdl = itv->v4l2_dev.ctrl_handler; itv->ctrl_pts = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops, V4L2_CID_MPEG_VIDEO_DEC_PTS, 0, 0, 0, 0); itv->ctrl_frame = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops, V4L2_CID_MPEG_VIDEO_DEC_FRAME, 0, 0, 0, 0); /* Note: V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO is not supported, mask that menu item. */ itv->ctrl_audio_playback = v4l2_ctrl_new_std_menu(hdl, &ivtv_hdl_out_ops, V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK, V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO, 1 << V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO, V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO); itv->ctrl_audio_multilingual_playback = v4l2_ctrl_new_std_menu(hdl, &ivtv_hdl_out_ops, V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK, V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO, 1 << V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO, V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT); if (hdl->error) { retval = hdl->error; goto free_i2c; } v4l2_ctrl_cluster(2, &itv->ctrl_pts); v4l2_ctrl_cluster(2, &itv->ctrl_audio_playback); ivtv_call_all(itv, video, s_std_output, itv->std); /* Turn off the output signal. The mpeg decoder is not yet active so without this you would get a green image until the mpeg decoder becomes active. */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0); } /* clear interrupt mask, effectively disabling interrupts */ ivtv_set_irq_mask(itv, 0xffffffff); /* Register IRQ */ retval = request_irq(itv->pdev->irq, ivtv_irq_handler, IRQF_SHARED | IRQF_DISABLED, itv->v4l2_dev.name, (void *)itv); if (retval) { IVTV_ERR("Failed to register irq %d\n", retval); goto free_i2c; } retval = ivtv_streams_setup(itv); if (retval) { IVTV_ERR("Error %d setting up streams\n", retval); goto free_irq; } retval = ivtv_streams_register(itv); if (retval) { IVTV_ERR("Error %d registering devices\n", retval); goto free_streams; } IVTV_INFO("Initialized card: %s\n", itv->card_name); /* Load ivtv submodules (ivtv-alsa) */ request_modules(itv); return 0; free_streams: ivtv_streams_cleanup(itv, 1); free_irq: free_irq(itv->pdev->irq, (void *)itv); free_i2c: v4l2_ctrl_handler_free(&itv->cxhdl.hdl); exit_ivtv_i2c(itv); free_io: ivtv_iounmap(itv); free_mem: release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (itv->has_cx23415) release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); free_worker: kthread_stop(itv->irq_worker_task); err: if (retval == 0) retval = -ENODEV; IVTV_ERR("Error %d on initialization\n", retval); v4l2_device_unregister(&itv->v4l2_dev); kfree(itv); return retval; } int ivtv_init_on_first_open(struct ivtv *itv) { struct v4l2_frequency vf; /* Needed to call ioctls later */ struct ivtv_open_id fh; int fw_retry_count = 3; int video_input; fh.itv = itv; fh.type = IVTV_ENC_STREAM_TYPE_MPG; if (test_bit(IVTV_F_I_FAILED, &itv->i_flags)) return -ENXIO; if (test_and_set_bit(IVTV_F_I_INITED, &itv->i_flags)) return 0; while (--fw_retry_count > 0) { /* load firmware */ if (ivtv_firmware_init(itv) == 0) break; if (fw_retry_count > 1) IVTV_WARN("Retry loading firmware\n"); } if (fw_retry_count == 0) { set_bit(IVTV_F_I_FAILED, &itv->i_flags); return -ENXIO; } /* Try and get firmware versions */ IVTV_DEBUG_INFO("Getting firmware version..\n"); ivtv_firmware_versions(itv); if (itv->card->hw_all & IVTV_HW_CX25840) v4l2_subdev_call(itv->sd_video, core, load_fw); vf.tuner = 0; vf.type = V4L2_TUNER_ANALOG_TV; vf.frequency = 6400; /* the tuner 'baseline' frequency */ /* Set initial frequency. For PAL/SECAM broadcasts no 'default' channel exists AFAIK. */ if (itv->std == V4L2_STD_NTSC_M_JP) { vf.frequency = 1460; /* ch. 1 91250*16/1000 */ } else if (itv->std & V4L2_STD_NTSC_M) { vf.frequency = 1076; /* ch. 4 67250*16/1000 */ } video_input = itv->active_input; itv->active_input++; /* Force update of input */ ivtv_s_input(NULL, &fh, video_input); /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code in one place. */ itv->std++; /* Force full standard initialization */ itv->std_out = itv->std; ivtv_s_frequency(NULL, &fh, &vf); if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes the mpeg decoder so now the saa7127 receives a proper signal. */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1); ivtv_init_mpeg_decoder(itv); } /* On a cx23416 this seems to be able to enable DMA to the chip? */ if (!itv->has_cx23415) write_reg_sync(0x03, IVTV_REG_DMACONTROL); ivtv_s_std_enc(itv, itv->tuner_std); /* Default interrupts enabled. For the PVR350 this includes the decoder VSYNC interrupt, which is always on. It is not only used during decoding but also by the OSD. Some old PVR250 cards had a cx23415, so testing for that is too general. Instead test if the card has video output capability. */ if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC); ivtv_set_osd_alpha(itv); ivtv_s_std_dec(itv, itv->tuner_std); } else { ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT); } /* Setup initial controls */ cx2341x_handler_setup(&itv->cxhdl); return 0; } static void ivtv_remove(struct pci_dev *pdev) { struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev); struct ivtv *itv = to_ivtv(v4l2_dev); int i; IVTV_DEBUG_INFO("Removing card\n"); flush_request_modules(itv); if (test_bit(IVTV_F_I_INITED, &itv->i_flags)) { /* Stop all captures */ IVTV_DEBUG_INFO("Stopping all streams\n"); if (atomic_read(&itv->capturing) > 0) ivtv_stop_all_captures(itv); /* Stop all decoding */ IVTV_DEBUG_INFO("Stopping decoding\n"); /* Turn off the TV-out */ if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 0); if (atomic_read(&itv->decoding) > 0) { int type; if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) type = IVTV_DEC_STREAM_TYPE_YUV; else type = IVTV_DEC_STREAM_TYPE_MPG; ivtv_stop_v4l2_decode_stream(&itv->streams[type], V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY, 0); } ivtv_halt_firmware(itv); } /* Interrupts */ ivtv_set_irq_mask(itv, 0xffffffff); del_timer_sync(&itv->dma_timer); /* Kill irq worker */ flush_kthread_worker(&itv->irq_worker); kthread_stop(itv->irq_worker_task); ivtv_streams_cleanup(itv, 1); ivtv_udma_free(itv); v4l2_ctrl_handler_free(&itv->cxhdl.hdl); exit_ivtv_i2c(itv); free_irq(itv->pdev->irq, (void *)itv); ivtv_iounmap(itv); release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE); release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (itv->has_cx23415) release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); pci_disable_device(itv->pdev); for (i = 0; i < IVTV_VBI_FRAMES; i++) kfree(itv->vbi.sliced_mpeg_data[i]); printk(KERN_INFO "ivtv: Removed %s\n", itv->card_name); v4l2_device_unregister(&itv->v4l2_dev); kfree(itv); } /* define a pci_driver for card detection */ static struct pci_driver ivtv_pci_driver = { .name = "ivtv", .id_table = ivtv_pci_tbl, .probe = ivtv_probe, .remove = ivtv_remove, }; static int __init module_start(void) { printk(KERN_INFO "ivtv: Start initialization, version %s\n", IVTV_VERSION); /* Validate parameters */ if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) { printk(KERN_ERR "ivtv: Exiting, ivtv_first_minor must be between 0 and %d\n", IVTV_MAX_CARDS - 1); return -1; } if (ivtv_debug < 0 || ivtv_debug > 2047) { ivtv_debug = 0; printk(KERN_INFO "ivtv: Debug value must be >= 0 and <= 2047\n"); } if (pci_register_driver(&ivtv_pci_driver)) { printk(KERN_ERR "ivtv: Error detecting PCI card\n"); return -ENODEV; } printk(KERN_INFO "ivtv: End initialization\n"); return 0; } static void __exit module_cleanup(void) { pci_unregister_driver(&ivtv_pci_driver); } /* Note: These symbols are exported because they are used by the ivtvfb framebuffer module and an infrared module for the IR-blaster. */ EXPORT_SYMBOL(ivtv_set_irq_mask); EXPORT_SYMBOL(ivtv_api); EXPORT_SYMBOL(ivtv_vapi); EXPORT_SYMBOL(ivtv_vapi_result); EXPORT_SYMBOL(ivtv_clear_irq_mask); EXPORT_SYMBOL(ivtv_debug); #ifdef CONFIG_VIDEO_ADV_DEBUG EXPORT_SYMBOL(ivtv_fw_debug); #endif EXPORT_SYMBOL(ivtv_reset_ir_gpio); EXPORT_SYMBOL(ivtv_udma_setup); EXPORT_SYMBOL(ivtv_udma_unmap); EXPORT_SYMBOL(ivtv_udma_alloc); EXPORT_SYMBOL(ivtv_udma_prepare); EXPORT_SYMBOL(ivtv_init_on_first_open); EXPORT_SYMBOL(ivtv_firmware_check); module_init(module_start); module_exit(module_cleanup);
gpl-2.0
keks2293/kernel_zte
drivers/crypto/picoxcell_crypto.c
2084
54299
/* * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <crypto/aead.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/authenc.h> #include <crypto/des.h> #include <crypto/md5.h> #include <crypto/sha.h> #include <crypto/internal/skcipher.h> #include <linux/clk.h> #include <linux/crypto.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/rtnetlink.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/timer.h> #include "picoxcell_crypto_regs.h" /* * The threshold for the number of entries in the CMD FIFO available before * the CMD0_CNT interrupt is raised. Increasing this value will reduce the * number of interrupts raised to the CPU. */ #define CMD0_IRQ_THRESHOLD 1 /* * The timeout period (in jiffies) for a PDU. When the the number of PDUs in * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. * When there are packets in flight but lower than the threshold, we enable * the timer and at expiry, attempt to remove any processed packets from the * queue and if there are still packets left, schedule the timer again. */ #define PACKET_TIMEOUT 1 /* The priority to register each algorithm with. */ #define SPACC_CRYPTO_ALG_PRIORITY 10000 #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16 #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64 #define SPACC_CRYPTO_IPSEC_MAX_CTXS 32 #define SPACC_CRYPTO_IPSEC_FIFO_SZ 32 #define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64 #define SPACC_CRYPTO_L2_HASH_PG_SZ 64 #define SPACC_CRYPTO_L2_MAX_CTXS 128 #define SPACC_CRYPTO_L2_FIFO_SZ 128 #define MAX_DDT_LEN 16 /* DDT format. This must match the hardware DDT format exactly. */ struct spacc_ddt { dma_addr_t p; u32 len; }; /* * Asynchronous crypto request structure. * * This structure defines a request that is either queued for processing or * being processed. */ struct spacc_req { struct list_head list; struct spacc_engine *engine; struct crypto_async_request *req; int result; bool is_encrypt; unsigned ctx_id; dma_addr_t src_addr, dst_addr; struct spacc_ddt *src_ddt, *dst_ddt; void (*complete)(struct spacc_req *req); /* AEAD specific bits. */ u8 *giv; size_t giv_len; dma_addr_t giv_pa; }; struct spacc_engine { void __iomem *regs; struct list_head pending; int next_ctx; spinlock_t hw_lock; int in_flight; struct list_head completed; struct list_head in_progress; struct tasklet_struct complete; unsigned long fifo_sz; void __iomem *cipher_ctx_base; void __iomem *hash_key_base; struct spacc_alg *algs; unsigned num_algs; struct list_head registered_algs; size_t cipher_pg_sz; size_t hash_pg_sz; const char *name; struct clk *clk; struct device *dev; unsigned max_ctxs; struct timer_list packet_timeout; unsigned stat_irq_thresh; struct dma_pool *req_pool; }; /* Algorithm type mask. */ #define SPACC_CRYPTO_ALG_MASK 0x7 /* SPACC definition of a crypto algorithm. */ struct spacc_alg { unsigned long ctrl_default; unsigned long type; struct crypto_alg alg; struct spacc_engine *engine; struct list_head entry; int key_offs; int iv_offs; }; /* Generic context structure for any algorithm type. */ struct spacc_generic_ctx { struct spacc_engine *engine; int flags; int key_offs; int iv_offs; }; /* Block cipher context. */ struct spacc_ablk_ctx { struct spacc_generic_ctx generic; u8 key[AES_MAX_KEY_SIZE]; u8 key_len; /* * The fallback cipher. If the operation can't be done in hardware, * fallback to a software version. */ struct crypto_ablkcipher *sw_cipher; }; /* AEAD cipher context. */ struct spacc_aead_ctx { struct spacc_generic_ctx generic; u8 cipher_key[AES_MAX_KEY_SIZE]; u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; u8 cipher_key_len; u8 hash_key_len; struct crypto_aead *sw_cipher; size_t auth_size; u8 salt[AES_BLOCK_SIZE]; }; static int spacc_ablk_submit(struct spacc_req *req); static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) { return alg ? container_of(alg, struct spacc_alg, alg) : NULL; } static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) { u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); return fifo_stat & SPA_FIFO_CMD_FULL; } /* * Given a cipher context, and a context number, get the base address of the * context page. * * Returns the address of the context page where the key/context may * be written. */ static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, unsigned indx, bool is_cipher_ctx) { return is_cipher_ctx ? ctx->engine->cipher_ctx_base + (indx * ctx->engine->cipher_pg_sz) : ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); } /* The context pages can only be written with 32-bit accesses. */ static inline void memcpy_toio32(u32 __iomem *dst, const void *src, unsigned count) { const u32 *src32 = (const u32 *) src; while (count--) writel(*src32++, dst++); } static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, void __iomem *page_addr, const u8 *key, size_t key_len, const u8 *iv, size_t iv_len) { void __iomem *key_ptr = page_addr + ctx->key_offs; void __iomem *iv_ptr = page_addr + ctx->iv_offs; memcpy_toio32(key_ptr, key, key_len / 4); memcpy_toio32(iv_ptr, iv, iv_len / 4); } /* * Load a context into the engines context memory. * * Returns the index of the context page where the context was loaded. */ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, const u8 *ciph_key, size_t ciph_len, const u8 *iv, size_t ivlen, const u8 *hash_key, size_t hash_len) { unsigned indx = ctx->engine->next_ctx++; void __iomem *ciph_page_addr, *hash_page_addr; ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, ivlen); writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | (1 << SPA_KEY_SZ_CIPHER_OFFSET), ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); if (hash_key) { memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); } return indx; } /* Count the number of scatterlist entries in a scatterlist. */ static int sg_count(struct scatterlist *sg_list, int nbytes) { struct scatterlist *sg = sg_list; int sg_nents = 0; while (nbytes > 0) { ++sg_nents; nbytes -= sg->length; sg = sg_next(sg); } return sg_nents; } static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) { ddt->p = phys; ddt->len = len; } /* * Take a crypto request and scatterlists for the data and turn them into DDTs * for passing to the crypto engines. This also DMA maps the data so that the * crypto engines can DMA to/from them. */ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, struct scatterlist *payload, unsigned nbytes, enum dma_data_direction dir, dma_addr_t *ddt_phys) { unsigned nents, mapped_ents; struct scatterlist *cur; struct spacc_ddt *ddt; int i; nents = sg_count(payload, nbytes); mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); if (mapped_ents + 1 > MAX_DDT_LEN) goto out; ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); if (!ddt) goto out; for_each_sg(payload, cur, mapped_ents, i) ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); ddt_set(&ddt[mapped_ents], 0, 0); return ddt; out: dma_unmap_sg(engine->dev, payload, nents, dir); return NULL; } static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) { struct aead_request *areq = container_of(req->req, struct aead_request, base); struct spacc_engine *engine = req->engine; struct spacc_ddt *src_ddt, *dst_ddt; unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); unsigned nents = sg_count(areq->src, areq->cryptlen); dma_addr_t iv_addr; struct scatterlist *cur; int i, dst_ents, src_ents, assoc_ents; u8 *iv = giv ? giv : areq->iv; src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); if (!src_ddt) return -ENOMEM; dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); if (!dst_ddt) { dma_pool_free(engine->req_pool, src_ddt, req->src_addr); return -ENOMEM; } req->src_ddt = src_ddt; req->dst_ddt = dst_ddt; assoc_ents = dma_map_sg(engine->dev, areq->assoc, sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); if (areq->src != areq->dst) { src_ents = dma_map_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); dst_ents = dma_map_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE); } else { src_ents = dma_map_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); dst_ents = 0; } /* * Map the IV/GIV. For the GIV it needs to be bidirectional as it is * formed by the crypto block and sent as the ESP IV for IPSEC. */ iv_addr = dma_map_single(engine->dev, iv, ivsize, giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); req->giv_pa = iv_addr; /* * Map the associated data. For decryption we don't copy the * associated data. */ for_each_sg(areq->assoc, cur, assoc_ents, i) { ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); if (req->is_encrypt) ddt_set(dst_ddt++, sg_dma_address(cur), sg_dma_len(cur)); } ddt_set(src_ddt++, iv_addr, ivsize); if (giv || req->is_encrypt) ddt_set(dst_ddt++, iv_addr, ivsize); /* * Now map in the payload for the source and destination and terminate * with the NULL pointers. */ for_each_sg(areq->src, cur, src_ents, i) { ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); if (areq->src == areq->dst) ddt_set(dst_ddt++, sg_dma_address(cur), sg_dma_len(cur)); } for_each_sg(areq->dst, cur, dst_ents, i) ddt_set(dst_ddt++, sg_dma_address(cur), sg_dma_len(cur)); ddt_set(src_ddt, 0, 0); ddt_set(dst_ddt, 0, 0); return 0; } static void spacc_aead_free_ddts(struct spacc_req *req) { struct aead_request *areq = container_of(req->req, struct aead_request, base); struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); struct spacc_engine *engine = aead_ctx->generic.engine; unsigned ivsize = alg->alg.cra_aead.ivsize; unsigned nents = sg_count(areq->src, areq->cryptlen); if (areq->src != areq->dst) { dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); dma_unmap_sg(engine->dev, areq->dst, sg_count(areq->dst, areq->cryptlen), DMA_FROM_DEVICE); } else dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); dma_unmap_sg(engine->dev, areq->assoc, sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); } static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, dma_addr_t ddt_addr, struct scatterlist *payload, unsigned nbytes, enum dma_data_direction dir) { unsigned nents = sg_count(payload, nbytes); dma_unmap_sg(req->engine->dev, payload, nents, dir); dma_pool_free(req->engine->req_pool, ddt, ddt_addr); } /* * Set key for a DES operation in an AEAD cipher. This also performs weak key * checking if required. */ static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); u32 tmp[DES_EXPKEY_WORDS]; if (unlikely(!des_ekey(tmp, key)) && (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } memcpy(ctx->cipher_key, key, len); ctx->cipher_key_len = len; return 0; } /* Set the key for the AES block cipher component of the AEAD transform. */ static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); /* * IPSec engine only supports 128 and 256 bit AES keys. If we get a * request for any other size (192 bits) then we need to do a software * fallback. */ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { /* * Set the fallback transform to use the same request flags as * the hardware transform. */ ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->sw_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; return crypto_aead_setkey(ctx->sw_cipher, key, len); } memcpy(ctx->cipher_key, key, len); ctx->cipher_key_len = len; return 0; } static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); struct rtattr *rta = (void *)key; struct crypto_authenc_key_param *param; unsigned int authkeylen, enckeylen; int err = -EINVAL; if (!RTA_OK(rta, keylen)) goto badkey; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) goto badkey; if (RTA_PAYLOAD(rta) < sizeof(*param)) goto badkey; param = RTA_DATA(rta); enckeylen = be32_to_cpu(param->enckeylen); key += RTA_ALIGN(rta->rta_len); keylen -= RTA_ALIGN(rta->rta_len); if (keylen < enckeylen) goto badkey; authkeylen = keylen - enckeylen; if (enckeylen > AES_MAX_KEY_SIZE) goto badkey; if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == SPA_CTRL_CIPH_ALG_AES) err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); else err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); if (err) goto badkey; memcpy(ctx->hash_ctx, key, authkeylen); ctx->hash_key_len = authkeylen; return 0; badkey: crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } static int spacc_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); ctx->auth_size = authsize; return 0; } /* * Check if an AEAD request requires a fallback operation. Some requests can't * be completed in hardware because the hardware may not support certain key * sizes. In these cases we need to complete the request in software. */ static int spacc_aead_need_fallback(struct spacc_req *req) { struct aead_request *aead_req; struct crypto_tfm *tfm = req->req->tfm; struct crypto_alg *alg = req->req->tfm->__crt_alg; struct spacc_alg *spacc_alg = to_spacc_alg(alg); struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); aead_req = container_of(req->req, struct aead_request, base); /* * If we have a non-supported key-length, then we need to do a * software fallback. */ if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == SPA_CTRL_CIPH_ALG_AES && ctx->cipher_key_len != AES_KEYSIZE_128 && ctx->cipher_key_len != AES_KEYSIZE_256) return 1; return 0; } static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, bool is_encrypt) { struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); int err; if (ctx->sw_cipher) { /* * Change the request to use the software fallback transform, * and once the ciphering has completed, put the old transform * back into the request. */ aead_request_set_tfm(req, ctx->sw_cipher); err = is_encrypt ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); } else err = -EINVAL; return err; } static void spacc_aead_complete(struct spacc_req *req) { spacc_aead_free_ddts(req); req->req->complete(req->req, req->result); } static int spacc_aead_submit(struct spacc_req *req) { struct crypto_tfm *tfm = req->req->tfm; struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = req->req->tfm->__crt_alg; struct spacc_alg *spacc_alg = to_spacc_alg(alg); struct spacc_engine *engine = ctx->generic.engine; u32 ctrl, proc_len, assoc_len; struct aead_request *aead_req = container_of(req->req, struct aead_request, base); req->result = -EINPROGRESS; req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, ctx->hash_ctx, ctx->hash_key_len); /* Set the source and destination DDT pointers. */ writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); assoc_len = aead_req->assoclen; proc_len = aead_req->cryptlen + assoc_len; /* * If we aren't generating an IV, then we need to include the IV in the * associated data so that it is included in the hash. */ if (!req->giv) { assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); } else proc_len += req->giv_len; /* * If we are decrypting, we need to take the length of the ICV out of * the processing length. */ if (!req->is_encrypt) proc_len -= ctx->auth_size; writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | (1 << SPA_CTRL_ICV_APPEND); if (req->is_encrypt) ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); else ctrl |= (1 << SPA_CTRL_KEY_EXP); mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); return -EINPROGRESS; } static int spacc_req_submit(struct spacc_req *req); static void spacc_push(struct spacc_engine *engine) { struct spacc_req *req; while (!list_empty(&engine->pending) && engine->in_flight + 1 <= engine->fifo_sz) { ++engine->in_flight; req = list_first_entry(&engine->pending, struct spacc_req, list); list_move_tail(&req->list, &engine->in_progress); req->result = spacc_req_submit(req); } } /* * Setup an AEAD request for processing. This will configure the engine, load * the context and then start the packet processing. * * @giv Pointer to destination address for a generated IV. If the * request does not need to generate an IV then this should be set to NULL. */ static int spacc_aead_setup(struct aead_request *req, u8 *giv, unsigned alg_type, bool is_encrypt) { struct crypto_alg *alg = req->base.tfm->__crt_alg; struct spacc_engine *engine = to_spacc_alg(alg)->engine; struct spacc_req *dev_req = aead_request_ctx(req); int err = -EINPROGRESS; unsigned long flags; unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); dev_req->giv = giv; dev_req->giv_len = ivsize; dev_req->req = &req->base; dev_req->is_encrypt = is_encrypt; dev_req->result = -EBUSY; dev_req->engine = engine; dev_req->complete = spacc_aead_complete; if (unlikely(spacc_aead_need_fallback(dev_req))) return spacc_aead_do_fallback(req, alg_type, is_encrypt); spacc_aead_make_ddts(dev_req, dev_req->giv); err = -EINPROGRESS; spin_lock_irqsave(&engine->hw_lock, flags); if (unlikely(spacc_fifo_cmd_full(engine)) || engine->in_flight + 1 > engine->fifo_sz) { if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { err = -EBUSY; spin_unlock_irqrestore(&engine->hw_lock, flags); goto out_free_ddts; } list_add_tail(&dev_req->list, &engine->pending); } else { list_add_tail(&dev_req->list, &engine->pending); spacc_push(engine); } spin_unlock_irqrestore(&engine->hw_lock, flags); goto out; out_free_ddts: spacc_aead_free_ddts(dev_req); out: return err; } static int spacc_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); return spacc_aead_setup(req, NULL, alg->type, 1); } static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) { struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); size_t ivsize = crypto_aead_ivsize(tfm); struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); unsigned len; __be64 seq; memcpy(req->areq.iv, ctx->salt, ivsize); len = ivsize; if (ivsize > sizeof(u64)) { memset(req->giv, 0, ivsize - sizeof(u64)); len = sizeof(u64); } seq = cpu_to_be64(req->seq); memcpy(req->giv + ivsize - len, &seq, len); return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); } static int spacc_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); return spacc_aead_setup(req, NULL, alg->type, 0); } /* * Initialise a new AEAD context. This is responsible for allocating the * fallback cipher and initialising the context. */ static int spacc_aead_cra_init(struct crypto_tfm *tfm) { struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; struct spacc_alg *spacc_alg = to_spacc_alg(alg); struct spacc_engine *engine = spacc_alg->engine; ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->sw_cipher)) { dev_warn(engine->dev, "failed to allocate fallback for %s\n", alg->cra_name); ctx->sw_cipher = NULL; } ctx->generic.key_offs = spacc_alg->key_offs; ctx->generic.iv_offs = spacc_alg->iv_offs; get_random_bytes(ctx->salt, sizeof(ctx->salt)); tfm->crt_aead.reqsize = sizeof(struct spacc_req); return 0; } /* * Destructor for an AEAD context. This is called when the transform is freed * and must free the fallback cipher. */ static void spacc_aead_cra_exit(struct crypto_tfm *tfm) { struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->sw_cipher) crypto_free_aead(ctx->sw_cipher); ctx->sw_cipher = NULL; } /* * Set the DES key for a block cipher transform. This also performs weak key * checking if the transform has requested it. */ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); u32 tmp[DES_EXPKEY_WORDS]; if (len > DES3_EDE_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (unlikely(!des_ekey(tmp, key)) && (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } memcpy(ctx->key, key, len); ctx->key_len = len; return 0; } /* * Set the key for an AES block cipher. Some key lengths are not supported in * hardware so this must also check whether a fallback is needed. */ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); int err = 0; if (len > AES_MAX_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /* * IPSec engine only supports 128 and 256 bit AES keys. If we get a * request for any other size (192 bits) then we need to do a software * fallback. */ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && ctx->sw_cipher) { /* * Set the fallback transform to use the same request flags as * the hardware transform. */ ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->sw_cipher->base.crt_flags |= cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); if (err) goto sw_setkey_failed; } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && !ctx->sw_cipher) err = -EINVAL; memcpy(ctx->key, key, len); ctx->key_len = len; sw_setkey_failed: if (err && ctx->sw_cipher) { tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm->crt_flags |= ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; } return err; } static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); int err = 0; if (len > AES_MAX_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); err = -EINVAL; goto out; } memcpy(ctx->key, key, len); ctx->key_len = len; out: return err; } static int spacc_ablk_need_fallback(struct spacc_req *req) { struct spacc_ablk_ctx *ctx; struct crypto_tfm *tfm = req->req->tfm; struct crypto_alg *alg = req->req->tfm->__crt_alg; struct spacc_alg *spacc_alg = to_spacc_alg(alg); ctx = crypto_tfm_ctx(tfm); return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == SPA_CTRL_CIPH_ALG_AES && ctx->key_len != AES_KEYSIZE_128 && ctx->key_len != AES_KEYSIZE_256; } static void spacc_ablk_complete(struct spacc_req *req) { struct ablkcipher_request *ablk_req = container_of(req->req, struct ablkcipher_request, base); if (ablk_req->src != ablk_req->dst) { spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, ablk_req->nbytes, DMA_TO_DEVICE); spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, ablk_req->nbytes, DMA_FROM_DEVICE); } else spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, ablk_req->nbytes, DMA_BIDIRECTIONAL); req->req->complete(req->req, req->result); } static int spacc_ablk_submit(struct spacc_req *req) { struct crypto_tfm *tfm = req->req->tfm; struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); struct crypto_alg *alg = req->req->tfm->__crt_alg; struct spacc_alg *spacc_alg = to_spacc_alg(alg); struct spacc_engine *engine = ctx->generic.engine; u32 ctrl; req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize, NULL, 0); writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET); writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : (1 << SPA_CTRL_KEY_EXP)); mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); return -EINPROGRESS; } static int spacc_ablk_do_fallback(struct ablkcipher_request *req, unsigned alg_type, bool is_encrypt) { struct crypto_tfm *old_tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); int err; if (!ctx->sw_cipher) return -EINVAL; /* * Change the request to use the software fallback transform, and once * the ciphering has completed, put the old transform back into the * request. */ ablkcipher_request_set_tfm(req, ctx->sw_cipher); err = is_encrypt ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); return err; } static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, bool is_encrypt) { struct crypto_alg *alg = req->base.tfm->__crt_alg; struct spacc_engine *engine = to_spacc_alg(alg)->engine; struct spacc_req *dev_req = ablkcipher_request_ctx(req); unsigned long flags; int err = -ENOMEM; dev_req->req = &req->base; dev_req->is_encrypt = is_encrypt; dev_req->engine = engine; dev_req->complete = spacc_ablk_complete; dev_req->result = -EINPROGRESS; if (unlikely(spacc_ablk_need_fallback(dev_req))) return spacc_ablk_do_fallback(req, alg_type, is_encrypt); /* * Create the DDT's for the engine. If we share the same source and * destination then we can optimize by reusing the DDT's. */ if (req->src != req->dst) { dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); if (!dev_req->src_ddt) goto out; dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); if (!dev_req->dst_ddt) goto out_free_src; } else { dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); if (!dev_req->dst_ddt) goto out; dev_req->src_ddt = NULL; dev_req->src_addr = dev_req->dst_addr; } err = -EINPROGRESS; spin_lock_irqsave(&engine->hw_lock, flags); /* * Check if the engine will accept the operation now. If it won't then * we either stick it on the end of a pending list if we can backlog, * or bailout with an error if not. */ if (unlikely(spacc_fifo_cmd_full(engine)) || engine->in_flight + 1 > engine->fifo_sz) { if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { err = -EBUSY; spin_unlock_irqrestore(&engine->hw_lock, flags); goto out_free_ddts; } list_add_tail(&dev_req->list, &engine->pending); } else { list_add_tail(&dev_req->list, &engine->pending); spacc_push(engine); } spin_unlock_irqrestore(&engine->hw_lock, flags); goto out; out_free_ddts: spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, req->nbytes, req->src == req->dst ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); out_free_src: if (req->src != req->dst) spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, req->src, req->nbytes, DMA_TO_DEVICE); out: return err; } static int spacc_ablk_cra_init(struct crypto_tfm *tfm) { struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; struct spacc_alg *spacc_alg = to_spacc_alg(alg); struct spacc_engine *engine = spacc_alg->engine; ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->sw_cipher)) { dev_warn(engine->dev, "failed to allocate fallback for %s\n", alg->cra_name); ctx->sw_cipher = NULL; } } ctx->generic.key_offs = spacc_alg->key_offs; ctx->generic.iv_offs = spacc_alg->iv_offs; tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req); return 0; } static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) { struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->sw_cipher) crypto_free_ablkcipher(ctx->sw_cipher); ctx->sw_cipher = NULL; } static int spacc_ablk_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); return spacc_ablk_setup(req, alg->type, 1); } static int spacc_ablk_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); return spacc_ablk_setup(req, alg->type, 0); } static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) { return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & SPA_FIFO_STAT_EMPTY; } static void spacc_process_done(struct spacc_engine *engine) { struct spacc_req *req; unsigned long flags; spin_lock_irqsave(&engine->hw_lock, flags); while (!spacc_fifo_stat_empty(engine)) { req = list_first_entry(&engine->in_progress, struct spacc_req, list); list_move_tail(&req->list, &engine->completed); --engine->in_flight; /* POP the status register. */ writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; /* * Convert the SPAcc error status into the standard POSIX error * codes. */ if (unlikely(req->result)) { switch (req->result) { case SPA_STATUS_ICV_FAIL: req->result = -EBADMSG; break; case SPA_STATUS_MEMORY_ERROR: dev_warn(engine->dev, "memory error triggered\n"); req->result = -EFAULT; break; case SPA_STATUS_BLOCK_ERROR: dev_warn(engine->dev, "block error triggered\n"); req->result = -EIO; break; } } } tasklet_schedule(&engine->complete); spin_unlock_irqrestore(&engine->hw_lock, flags); } static irqreturn_t spacc_spacc_irq(int irq, void *dev) { struct spacc_engine *engine = (struct spacc_engine *)dev; u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); spacc_process_done(engine); return IRQ_HANDLED; } static void spacc_packet_timeout(unsigned long data) { struct spacc_engine *engine = (struct spacc_engine *)data; spacc_process_done(engine); } static int spacc_req_submit(struct spacc_req *req) { struct crypto_alg *alg = req->req->tfm->__crt_alg; if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) return spacc_aead_submit(req); else return spacc_ablk_submit(req); } static void spacc_spacc_complete(unsigned long data) { struct spacc_engine *engine = (struct spacc_engine *)data; struct spacc_req *req, *tmp; unsigned long flags; LIST_HEAD(completed); spin_lock_irqsave(&engine->hw_lock, flags); list_splice_init(&engine->completed, &completed); spacc_push(engine); if (engine->in_flight) mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); spin_unlock_irqrestore(&engine->hw_lock, flags); list_for_each_entry_safe(req, tmp, &completed, list) { list_del(&req->list); req->complete(req); } } #ifdef CONFIG_PM static int spacc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct spacc_engine *engine = platform_get_drvdata(pdev); /* * We only support standby mode. All we have to do is gate the clock to * the spacc. The hardware will preserve state until we turn it back * on again. */ clk_disable(engine->clk); return 0; } static int spacc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct spacc_engine *engine = platform_get_drvdata(pdev); return clk_enable(engine->clk); } static const struct dev_pm_ops spacc_pm_ops = { .suspend = spacc_suspend, .resume = spacc_resume, }; #endif /* CONFIG_PM */ static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) { return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; } static ssize_t spacc_stat_irq_thresh_show(struct device *dev, struct device_attribute *attr, char *buf) { struct spacc_engine *engine = spacc_dev_to_engine(dev); return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); } static ssize_t spacc_stat_irq_thresh_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct spacc_engine *engine = spacc_dev_to_engine(dev); unsigned long thresh; if (strict_strtoul(buf, 0, &thresh)) return -EINVAL; thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); engine->stat_irq_thresh = thresh; writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, engine->regs + SPA_IRQ_CTRL_REG_OFFSET); return len; } static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, spacc_stat_irq_thresh_store); static struct spacc_alg ipsec_engine_algs[] = { { .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .alg = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_aes_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, { .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, .alg = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_aes_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, .alg = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_des_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, .alg = { .cra_name = "ecb(des)", .cra_driver_name = "ecb-des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_des_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, .alg = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-ede-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_des_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, .alg = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-ede-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_des_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, { .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .alg = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_aead_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_aead = { .setkey = spacc_aead_setkey, .setauthsize = spacc_aead_setauthsize, .encrypt = spacc_aead_encrypt, .decrypt = spacc_aead_decrypt, .givencrypt = spacc_aead_givencrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cra_init = spacc_aead_cra_init, .cra_exit = spacc_aead_cra_exit, }, }, { .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_SHA256 | SPA_CTRL_HASH_MODE_HMAC, .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .alg = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_aead_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_aead = { .setkey = spacc_aead_setkey, .setauthsize = spacc_aead_setauthsize, .encrypt = spacc_aead_encrypt, .decrypt = spacc_aead_decrypt, .givencrypt = spacc_aead_givencrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cra_init = spacc_aead_cra_init, .cra_exit = spacc_aead_cra_exit, }, }, { .key_offs = 0, .iv_offs = AES_MAX_KEY_SIZE, .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, .alg = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_aead_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_aead = { .setkey = spacc_aead_setkey, .setauthsize = spacc_aead_setauthsize, .encrypt = spacc_aead_encrypt, .decrypt = spacc_aead_decrypt, .givencrypt = spacc_aead_givencrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .cra_init = spacc_aead_cra_init, .cra_exit = spacc_aead_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, .alg = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_aead_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_aead = { .setkey = spacc_aead_setkey, .setauthsize = spacc_aead_setauthsize, .encrypt = spacc_aead_encrypt, .decrypt = spacc_aead_decrypt, .givencrypt = spacc_aead_givencrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cra_init = spacc_aead_cra_init, .cra_exit = spacc_aead_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_SHA256 | SPA_CTRL_HASH_MODE_HMAC, .alg = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_aead_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_aead = { .setkey = spacc_aead_setkey, .setauthsize = spacc_aead_setauthsize, .encrypt = spacc_aead_encrypt, .decrypt = spacc_aead_decrypt, .givencrypt = spacc_aead_givencrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cra_init = spacc_aead_cra_init, .cra_exit = spacc_aead_cra_exit, }, }, { .key_offs = DES_BLOCK_SIZE, .iv_offs = 0, .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, .alg = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct spacc_aead_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_aead = { .setkey = spacc_aead_setkey, .setauthsize = spacc_aead_setauthsize, .encrypt = spacc_aead_encrypt, .decrypt = spacc_aead_decrypt, .givencrypt = spacc_aead_givencrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .cra_init = spacc_aead_cra_init, .cra_exit = spacc_aead_cra_exit, }, }, }; static struct spacc_alg l2_engine_algs[] = { { .key_offs = 0, .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | SPA_CTRL_CIPH_MODE_F8, .alg = { .cra_name = "f8(kasumi)", .cra_driver_name = "f8-kasumi-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 8, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_ablkcipher = { .setkey = spacc_kasumi_f8_setkey, .encrypt = spacc_ablk_encrypt, .decrypt = spacc_ablk_decrypt, .min_keysize = 16, .max_keysize = 16, .ivsize = 8, }, .cra_init = spacc_ablk_cra_init, .cra_exit = spacc_ablk_cra_exit, }, }, }; #ifdef CONFIG_OF static const struct of_device_id spacc_of_id_table[] = { { .compatible = "picochip,spacc-ipsec" }, { .compatible = "picochip,spacc-l2" }, {} }; #endif /* CONFIG_OF */ static bool spacc_is_compatible(struct platform_device *pdev, const char *spacc_type) { const struct platform_device_id *platid = platform_get_device_id(pdev); if (platid && !strcmp(platid->name, spacc_type)) return true; #ifdef CONFIG_OF if (of_device_is_compatible(pdev->dev.of_node, spacc_type)) return true; #endif /* CONFIG_OF */ return false; } static int spacc_probe(struct platform_device *pdev) { int i, err, ret = -EINVAL; struct resource *mem, *irq; struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), GFP_KERNEL); if (!engine) return -ENOMEM; if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) { engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS; engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ; engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ; engine->algs = ipsec_engine_algs; engine->num_algs = ARRAY_SIZE(ipsec_engine_algs); } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS; engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ; engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ; engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ; engine->algs = l2_engine_algs; engine->num_algs = ARRAY_SIZE(l2_engine_algs); } else { return -EINVAL; } engine->name = dev_name(&pdev->dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!mem || !irq) { dev_err(&pdev->dev, "no memory/irq resource for engine\n"); return -ENXIO; } if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), engine->name)) return -ENOMEM; engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!engine->regs) { dev_err(&pdev->dev, "memory map failed\n"); return -ENOMEM; } if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, engine->name, engine)) { dev_err(engine->dev, "failed to request IRQ\n"); return -EBUSY; } engine->dev = &pdev->dev; engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; engine->req_pool = dmam_pool_create(engine->name, engine->dev, MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); if (!engine->req_pool) return -ENOMEM; spin_lock_init(&engine->hw_lock); engine->clk = clk_get(&pdev->dev, "ref"); if (IS_ERR(engine->clk)) { dev_info(&pdev->dev, "clk unavailable\n"); device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); return PTR_ERR(engine->clk); } if (clk_enable(engine->clk)) { dev_info(&pdev->dev, "unable to enable clk\n"); clk_put(engine->clk); return -EIO; } err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); if (err) { clk_disable(engine->clk); clk_put(engine->clk); return err; } /* * Use an IRQ threshold of 50% as a default. This seems to be a * reasonable trade off of latency against throughput but can be * changed at runtime. */ engine->stat_irq_thresh = (engine->fifo_sz / 2); /* * Configure the interrupts. We only use the STAT_CNT interrupt as we * only submit a new packet for processing when we complete another in * the queue. This minimizes time spent in the interrupt handler. */ writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, engine->regs + SPA_IRQ_CTRL_REG_OFFSET); writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, engine->regs + SPA_IRQ_EN_REG_OFFSET); setup_timer(&engine->packet_timeout, spacc_packet_timeout, (unsigned long)engine); INIT_LIST_HEAD(&engine->pending); INIT_LIST_HEAD(&engine->completed); INIT_LIST_HEAD(&engine->in_progress); engine->in_flight = 0; tasklet_init(&engine->complete, spacc_spacc_complete, (unsigned long)engine); platform_set_drvdata(pdev, engine); INIT_LIST_HEAD(&engine->registered_algs); for (i = 0; i < engine->num_algs; ++i) { engine->algs[i].engine = engine; err = crypto_register_alg(&engine->algs[i].alg); if (!err) { list_add_tail(&engine->algs[i].entry, &engine->registered_algs); ret = 0; } if (err) dev_err(engine->dev, "failed to register alg \"%s\"\n", engine->algs[i].alg.cra_name); else dev_dbg(engine->dev, "registered alg \"%s\"\n", engine->algs[i].alg.cra_name); } return ret; } static int spacc_remove(struct platform_device *pdev) { struct spacc_alg *alg, *next; struct spacc_engine *engine = platform_get_drvdata(pdev); del_timer_sync(&engine->packet_timeout); device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { list_del(&alg->entry); crypto_unregister_alg(&alg->alg); } clk_disable(engine->clk); clk_put(engine->clk); return 0; } static const struct platform_device_id spacc_id_table[] = { { "picochip,spacc-ipsec", }, { "picochip,spacc-l2", }, { } }; static struct platform_driver spacc_driver = { .probe = spacc_probe, .remove = spacc_remove, .driver = { .name = "picochip,spacc", #ifdef CONFIG_PM .pm = &spacc_pm_ops, #endif /* CONFIG_PM */ .of_match_table = of_match_ptr(spacc_of_id_table), }, .id_table = spacc_id_table, }; module_platform_driver(spacc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles");
gpl-2.0
chaveiro/LG_P920_V30A_Kernel
fs/xfs/xfs_trans_ail.c
2340
23949
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * Copyright (c) 2008 Dave Chinner * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_trans_priv.h" #include "xfs_error.h" #ifdef DEBUG /* * Check that the list is sorted as it should be. */ STATIC void xfs_ail_check( struct xfs_ail *ailp, xfs_log_item_t *lip) { xfs_log_item_t *prev_lip; if (list_empty(&ailp->xa_ail)) return; /* * Check the next and previous entries are valid. */ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); if (&prev_lip->li_ail != &ailp->xa_ail) ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); if (&prev_lip->li_ail != &ailp->xa_ail) ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); #ifdef XFS_TRANS_DEBUG /* * Walk the list checking lsn ordering, and that every entry has the * XFS_LI_IN_AIL flag set. This is really expensive, so only do it * when specifically debugging the transaction subsystem. */ prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); list_for_each_entry(lip, &ailp->xa_ail, li_ail) { if (&prev_lip->li_ail != &ailp->xa_ail) ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); prev_lip = lip; } #endif /* XFS_TRANS_DEBUG */ } #else /* !DEBUG */ #define xfs_ail_check(a,l) #endif /* DEBUG */ /* * Return a pointer to the first item in the AIL. If the AIL is empty, then * return NULL. */ static xfs_log_item_t * xfs_ail_min( struct xfs_ail *ailp) { if (list_empty(&ailp->xa_ail)) return NULL; return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); } /* * Return a pointer to the last item in the AIL. If the AIL is empty, then * return NULL. */ static xfs_log_item_t * xfs_ail_max( struct xfs_ail *ailp) { if (list_empty(&ailp->xa_ail)) return NULL; return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); } /* * Return a pointer to the item which follows the given item in the AIL. If * the given item is the last item in the list, then return NULL. */ static xfs_log_item_t * xfs_ail_next( struct xfs_ail *ailp, xfs_log_item_t *lip) { if (lip->li_ail.next == &ailp->xa_ail) return NULL; return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); } /* * This is called by the log manager code to determine the LSN of the tail of * the log. This is exactly the LSN of the first item in the AIL. If the AIL * is empty, then this function returns 0. * * We need the AIL lock in order to get a coherent read of the lsn of the last * item in the AIL. */ xfs_lsn_t xfs_ail_min_lsn( struct xfs_ail *ailp) { xfs_lsn_t lsn = 0; xfs_log_item_t *lip; spin_lock(&ailp->xa_lock); lip = xfs_ail_min(ailp); if (lip) lsn = lip->li_lsn; spin_unlock(&ailp->xa_lock); return lsn; } /* * Return the maximum lsn held in the AIL, or zero if the AIL is empty. */ static xfs_lsn_t xfs_ail_max_lsn( struct xfs_ail *ailp) { xfs_lsn_t lsn = 0; xfs_log_item_t *lip; spin_lock(&ailp->xa_lock); lip = xfs_ail_max(ailp); if (lip) lsn = lip->li_lsn; spin_unlock(&ailp->xa_lock); return lsn; } /* * AIL traversal cursor initialisation. * * The cursor keeps track of where our current traversal is up * to by tracking the next ƣtem in the list for us. However, for * this to be safe, removing an object from the AIL needs to invalidate * any cursor that points to it. hence the traversal cursor needs to * be linked to the struct xfs_ail so that deletion can search all the * active cursors for invalidation. * * We don't link the push cursor because it is embedded in the struct * xfs_ail and hence easily findable. */ STATIC void xfs_trans_ail_cursor_init( struct xfs_ail *ailp, struct xfs_ail_cursor *cur) { cur->item = NULL; if (cur == &ailp->xa_cursors) return; cur->next = ailp->xa_cursors.next; ailp->xa_cursors.next = cur; } /* * Set the cursor to the next item, because when we look * up the cursor the current item may have been freed. */ STATIC void xfs_trans_ail_cursor_set( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, struct xfs_log_item *lip) { if (lip) cur->item = xfs_ail_next(ailp, lip); } /* * Get the next item in the traversal and advance the cursor. * If the cursor was invalidated (inidicated by a lip of 1), * restart the traversal. */ struct xfs_log_item * xfs_trans_ail_cursor_next( struct xfs_ail *ailp, struct xfs_ail_cursor *cur) { struct xfs_log_item *lip = cur->item; if ((__psint_t)lip & 1) lip = xfs_ail_min(ailp); xfs_trans_ail_cursor_set(ailp, cur, lip); return lip; } /* * Now that the traversal is complete, we need to remove the cursor * from the list of traversing cursors. Avoid removing the embedded * push cursor, but use the fact it is always present to make the * list deletion simple. */ void xfs_trans_ail_cursor_done( struct xfs_ail *ailp, struct xfs_ail_cursor *done) { struct xfs_ail_cursor *prev = NULL; struct xfs_ail_cursor *cur; done->item = NULL; if (done == &ailp->xa_cursors) return; prev = &ailp->xa_cursors; for (cur = prev->next; cur; prev = cur, cur = prev->next) { if (cur == done) { prev->next = cur->next; break; } } ASSERT(cur); } /* * Invalidate any cursor that is pointing to this item. This is * called when an item is removed from the AIL. Any cursor pointing * to this object is now invalid and the traversal needs to be * terminated so it doesn't reference a freed object. We set the * cursor item to a value of 1 so we can distinguish between an * invalidation and the end of the list when getting the next item * from the cursor. */ STATIC void xfs_trans_ail_cursor_clear( struct xfs_ail *ailp, struct xfs_log_item *lip) { struct xfs_ail_cursor *cur; /* need to search all cursors */ for (cur = &ailp->xa_cursors; cur; cur = cur->next) { if (cur->item == lip) cur->item = (struct xfs_log_item *) ((__psint_t)cur->item | 1); } } /* * Initialise the cursor to the first item in the AIL with the given @lsn. * This searches the list from lowest LSN to highest. Pass a @lsn of zero * to initialise the cursor to the first item in the AIL. */ xfs_log_item_t * xfs_trans_ail_cursor_first( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn) { xfs_log_item_t *lip; xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_ail_min(ailp); if (lsn == 0) goto out; list_for_each_entry(lip, &ailp->xa_ail, li_ail) { if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) goto out; } lip = NULL; out: xfs_trans_ail_cursor_set(ailp, cur, lip); return lip; } /* * Initialise the cursor to the last item in the AIL with the given @lsn. * This searches the list from highest LSN to lowest. If there is no item with * the value of @lsn, then it sets the cursor to the last item with an LSN lower * than @lsn. */ static struct xfs_log_item * __xfs_trans_ail_cursor_last( struct xfs_ail *ailp, xfs_lsn_t lsn) { xfs_log_item_t *lip; list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) return lip; } return NULL; } /* * Initialise the cursor to the last item in the AIL with the given @lsn. * This searches the list from highest LSN to lowest. */ struct xfs_log_item * xfs_trans_ail_cursor_last( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn) { xfs_trans_ail_cursor_init(ailp, cur); cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); return cur->item; } /* * splice the log item list into the AIL at the given LSN. We splice to the * tail of the given LSN to maintain insert order for push traversals. The * cursor is optional, allowing repeated updates to the same LSN to avoid * repeated traversals. */ static void xfs_ail_splice( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, struct list_head *list, xfs_lsn_t lsn) { struct xfs_log_item *lip = cur ? cur->item : NULL; struct xfs_log_item *next_lip; /* * Get a new cursor if we don't have a placeholder or the existing one * has been invalidated. */ if (!lip || (__psint_t)lip & 1) { lip = __xfs_trans_ail_cursor_last(ailp, lsn); if (!lip) { /* The list is empty, so just splice and return. */ if (cur) cur->item = NULL; list_splice(list, &ailp->xa_ail); return; } } /* * Our cursor points to the item we want to insert _after_, so we have * to update the cursor to point to the end of the list we are splicing * in so that it points to the correct location for the next splice. * i.e. before the splice * * lsn -> lsn -> lsn + x -> lsn + x ... * ^ * | cursor points here * * After the splice we have: * * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... * ^ ^ * | cursor points here | needs to move here * * So we set the cursor to the last item in the list to be spliced * before we execute the splice, resulting in the cursor pointing to * the correct item after the splice occurs. */ if (cur) { next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); cur->item = next_lip; } list_splice(list, &lip->li_ail); } /* * Delete the given item from the AIL. Return a pointer to the item. */ static void xfs_ail_delete( struct xfs_ail *ailp, xfs_log_item_t *lip) { xfs_ail_check(ailp, lip); list_del(&lip->li_ail); xfs_trans_ail_cursor_clear(ailp, lip); } static long xfsaild_push( struct xfs_ail *ailp) { xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor *cur = &ailp->xa_cursors; xfs_log_item_t *lip; xfs_lsn_t lsn; xfs_lsn_t target; long tout = 10; int flush_log = 0; int stuck = 0; int count = 0; int push_xfsbufd = 0; spin_lock(&ailp->xa_lock); target = ailp->xa_target; xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { /* * AIL is empty or our push has reached the end. */ xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&ailp->xa_lock); goto out_done; } XFS_STATS_INC(xs_push_ail); /* * While the item we are looking at is below the given threshold * try to flush it out. We'd like not to stop until we've at least * tried to push on everything in the AIL with an LSN less than * the given threshold. * * However, we will stop after a certain number of pushes and wait * for a reduced timeout to fire before pushing further. This * prevents use from spinning when we can't do anything or there is * lots of contention on the AIL lists. */ lsn = lip->li_lsn; while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { int lock_result; /* * If we can lock the item without sleeping, unlock the AIL * lock and flush the item. Then re-grab the AIL lock so we * can look for the next item on the AIL. List changes are * handled by the AIL lookup functions internally * * If we can't lock the item, either its holder will flush it * or it is already being flushed or it is being relogged. In * any of these case it is being taken care of and we can just * skip to the next item in the list. */ lock_result = IOP_TRYLOCK(lip); spin_unlock(&ailp->xa_lock); switch (lock_result) { case XFS_ITEM_SUCCESS: XFS_STATS_INC(xs_push_ail_success); IOP_PUSH(lip); ailp->xa_last_pushed_lsn = lsn; break; case XFS_ITEM_PUSHBUF: XFS_STATS_INC(xs_push_ail_pushbuf); if (!IOP_PUSHBUF(lip)) { stuck++; flush_log = 1; } else { ailp->xa_last_pushed_lsn = lsn; } push_xfsbufd = 1; break; case XFS_ITEM_PINNED: XFS_STATS_INC(xs_push_ail_pinned); stuck++; flush_log = 1; break; case XFS_ITEM_LOCKED: XFS_STATS_INC(xs_push_ail_locked); stuck++; break; default: ASSERT(0); break; } spin_lock(&ailp->xa_lock); /* should we bother continuing? */ if (XFS_FORCED_SHUTDOWN(mp)) break; ASSERT(mp->m_log); count++; /* * Are there too many items we can't do anything with? * If we we are skipping too many items because we can't flush * them or they are already being flushed, we back off and * given them time to complete whatever operation is being * done. i.e. remove pressure from the AIL while we can't make * progress so traversals don't slow down further inserts and * removals to/from the AIL. * * The value of 100 is an arbitrary magic number based on * observation. */ if (stuck > 100) break; lip = xfs_trans_ail_cursor_next(ailp, cur); if (lip == NULL) break; lsn = lip->li_lsn; } xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&ailp->xa_lock); if (flush_log) { /* * If something we need to push out was pinned, then * push out the log so it will become unpinned and * move forward in the AIL. */ XFS_STATS_INC(xs_push_ail_flush); xfs_log_force(mp, 0); } if (push_xfsbufd) { /* we've got delayed write buffers to flush */ wake_up_process(mp->m_ddev_targp->bt_task); } /* assume we have more work to do in a short while */ out_done: if (!count) { /* We're past our target or empty, so idle */ ailp->xa_last_pushed_lsn = 0; tout = 50; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* * We reached the target so wait a bit longer for I/O to * complete and remove pushed items from the AIL before we * start the next scan from the start of the AIL. */ tout = 50; ailp->xa_last_pushed_lsn = 0; } else if ((stuck * 100) / count > 90) { /* * Either there is a lot of contention on the AIL or we * are stuck due to operations in progress. "Stuck" in this * case is defined as >90% of the items we tried to push * were stuck. * * Backoff a bit more to allow some I/O to complete before * continuing from where we were. */ tout = 20; } return tout; } static int xfsaild( void *data) { struct xfs_ail *ailp = data; long tout = 0; /* milliseconds */ while (!kthread_should_stop()) { if (tout && tout <= 20) __set_current_state(TASK_KILLABLE); else __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(tout ? msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); try_to_freeze(); tout = xfsaild_push(ailp); } return 0; } /* * This routine is called to move the tail of the AIL forward. It does this by * trying to flush items in the AIL whose lsns are below the given * threshold_lsn. * * The push is run asynchronously in a workqueue, which means the caller needs * to handle waiting on the async flush for space to become available. * We don't want to interrupt any push that is in progress, hence we only queue * work if we set the pushing bit approriately. * * We do this unlocked - we only need to know whether there is anything in the * AIL at the time we are called. We don't need to access the contents of * any of the objects, so the lock is not needed. */ void xfs_ail_push( struct xfs_ail *ailp, xfs_lsn_t threshold_lsn) { xfs_log_item_t *lip; lip = xfs_ail_min(ailp); if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) return; /* * Ensure that the new target is noticed in push code before it clears * the XFS_AIL_PUSHING_BIT. */ smp_wmb(); xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); smp_wmb(); wake_up_process(ailp->xa_task); } /* * Push out all items in the AIL immediately */ void xfs_ail_push_all( struct xfs_ail *ailp) { xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); if (threshold_lsn) xfs_ail_push(ailp, threshold_lsn); } /* * This is to be called when an item is unlocked that may have * been in the AIL. It will wake up the first member of the AIL * wait list if this item's unlocking might allow it to progress. * If the item is in the AIL, then we need to get the AIL lock * while doing our checking so we don't race with someone going * to sleep waiting for this event in xfs_trans_push_ail(). */ void xfs_trans_unlocked_item( struct xfs_ail *ailp, xfs_log_item_t *lip) { xfs_log_item_t *min_lip; /* * If we're forcibly shutting down, we may have * unlocked log items arbitrarily. The last thing * we want to do is to move the tail of the log * over some potentially valid data. */ if (!(lip->li_flags & XFS_LI_IN_AIL) || XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { return; } /* * This is the one case where we can call into xfs_ail_min() * without holding the AIL lock because we only care about the * case where we are at the tail of the AIL. If the object isn't * at the tail, it doesn't matter what result we get back. This * is slightly racy because since we were just unlocked, we could * go to sleep between the call to xfs_ail_min and the call to * xfs_log_move_tail, have someone else lock us, commit to us disk, * move us out of the tail of the AIL, and then we wake up. However, * the call to xfs_log_move_tail() doesn't do anything if there's * not enough free space to wake people up so we're safe calling it. */ min_lip = xfs_ail_min(ailp); if (min_lip == lip) xfs_log_move_tail(ailp->xa_mount, 1); } /* xfs_trans_unlocked_item */ /* * xfs_trans_ail_update - bulk AIL insertion operation. * * @xfs_trans_ail_update takes an array of log items that all need to be * positioned at the same LSN in the AIL. If an item is not in the AIL, it will * be added. Otherwise, it will be repositioned by removing it and re-adding * it to the AIL. If we move the first item in the AIL, update the log tail to * match the new minimum LSN in the AIL. * * This function takes the AIL lock once to execute the update operations on * all the items in the array, and as such should not be called with the AIL * lock held. As a result, once we have the AIL lock, we need to check each log * item LSN to confirm it needs to be moved forward in the AIL. * * To optimise the insert operation, we delete all the items from the AIL in * the first pass, moving them into a temporary list, then splice the temporary * list into the correct position in the AIL. This avoids needing to do an * insert operation on every item. * * This function must be called with the AIL lock held. The lock is dropped * before returning. */ void xfs_trans_ail_update_bulk( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, struct xfs_log_item **log_items, int nr_items, xfs_lsn_t lsn) __releases(ailp->xa_lock) { xfs_log_item_t *mlip; xfs_lsn_t tail_lsn; int mlip_changed = 0; int i; LIST_HEAD(tmp); mlip = xfs_ail_min(ailp); for (i = 0; i < nr_items; i++) { struct xfs_log_item *lip = log_items[i]; if (lip->li_flags & XFS_LI_IN_AIL) { /* check if we really need to move the item */ if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) continue; xfs_ail_delete(ailp, lip); if (mlip == lip) mlip_changed = 1; } else { lip->li_flags |= XFS_LI_IN_AIL; } lip->li_lsn = lsn; list_add(&lip->li_ail, &tmp); } xfs_ail_splice(ailp, cur, &tmp, lsn); if (!mlip_changed) { spin_unlock(&ailp->xa_lock); return; } /* * It is not safe to access mlip after the AIL lock is dropped, so we * must get a copy of li_lsn before we do so. This is especially * important on 32-bit platforms where accessing and updating 64-bit * values like li_lsn is not atomic. */ mlip = xfs_ail_min(ailp); tail_lsn = mlip->li_lsn; spin_unlock(&ailp->xa_lock); xfs_log_move_tail(ailp->xa_mount, tail_lsn); } /* * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL * * @xfs_trans_ail_delete_bulk takes an array of log items that all need to * removed from the AIL. The caller is already holding the AIL lock, and done * all the checks necessary to ensure the items passed in via @log_items are * ready for deletion. This includes checking that the items are in the AIL. * * For each log item to be removed, unlink it from the AIL, clear the IN_AIL * flag from the item and reset the item's lsn to 0. If we remove the first * item in the AIL, update the log tail to match the new minimum LSN in the * AIL. * * This function will not drop the AIL lock until all items are removed from * the AIL to minimise the amount of lock traffic on the AIL. This does not * greatly increase the AIL hold time, but does significantly reduce the amount * of traffic on the lock, especially during IO completion. * * This function must be called with the AIL lock held. The lock is dropped * before returning. */ void xfs_trans_ail_delete_bulk( struct xfs_ail *ailp, struct xfs_log_item **log_items, int nr_items) __releases(ailp->xa_lock) { xfs_log_item_t *mlip; xfs_lsn_t tail_lsn; int mlip_changed = 0; int i; mlip = xfs_ail_min(ailp); for (i = 0; i < nr_items; i++) { struct xfs_log_item *lip = log_items[i]; if (!(lip->li_flags & XFS_LI_IN_AIL)) { struct xfs_mount *mp = ailp->xa_mount; spin_unlock(&ailp->xa_lock); if (!XFS_FORCED_SHUTDOWN(mp)) { xfs_alert_tag(mp, XFS_PTAG_AILDELETE, "%s: attempting to delete a log item that is not in the AIL", __func__); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); } return; } xfs_ail_delete(ailp, lip); lip->li_flags &= ~XFS_LI_IN_AIL; lip->li_lsn = 0; if (mlip == lip) mlip_changed = 1; } if (!mlip_changed) { spin_unlock(&ailp->xa_lock); return; } /* * It is not safe to access mlip after the AIL lock is dropped, so we * must get a copy of li_lsn before we do so. This is especially * important on 32-bit platforms where accessing and updating 64-bit * values like li_lsn is not atomic. It is possible we've emptied the * AIL here, so if that is the case, pass an LSN of 0 to the tail move. */ mlip = xfs_ail_min(ailp); tail_lsn = mlip ? mlip->li_lsn : 0; spin_unlock(&ailp->xa_lock); xfs_log_move_tail(ailp->xa_mount, tail_lsn); } /* * The active item list (AIL) is a doubly linked list of log * items sorted by ascending lsn. The base of the list is * a forw/back pointer pair embedded in the xfs mount structure. * The base is initialized with both pointers pointing to the * base. This case always needs to be distinguished, because * the base has no lsn to look at. We almost always insert * at the end of the list, so on inserts we search from the * end of the list to find where the new item belongs. */ /* * Initialize the doubly linked list to point only to itself. */ int xfs_trans_ail_init( xfs_mount_t *mp) { struct xfs_ail *ailp; ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); if (!ailp) return ENOMEM; ailp->xa_mount = mp; INIT_LIST_HEAD(&ailp->xa_ail); spin_lock_init(&ailp->xa_lock); ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", ailp->xa_mount->m_fsname); if (IS_ERR(ailp->xa_task)) goto out_free_ailp; mp->m_ail = ailp; return 0; out_free_ailp: kmem_free(ailp); return ENOMEM; } void xfs_trans_ail_destroy( xfs_mount_t *mp) { struct xfs_ail *ailp = mp->m_ail; kthread_stop(ailp->xa_task); kmem_free(ailp); }
gpl-2.0
TeamEOS/kernel_moto_shamu
drivers/media/platform/soc_camera/soc_mediabus.c
3108
13428
/* * soc-camera media bus helper routines * * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> static const struct soc_mbus_lookup mbus_fmt[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555, .name = "RGB555", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555X, .name = "RGB555X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565, .name = "RGB565", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565X, .name = "RGB565X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB666_1X18, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB666/32bpp", .bits_per_sample = 18, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB888_1X24, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB888/32bpp", .bits_per_sample = 24, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB888_2X12_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB888/32bpp", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_RGB888_2X12_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB32, .name = "RGB888/32bpp", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND32, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR8, .name = "Bayer 8 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_Y8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_GREY, .name = "Grey", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_Y10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_Y10, .name = "Grey 10bit", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_JPEG_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_JPEG, .name = "JPEG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_VARIABLE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB444, .name = "RGB444", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUV420, .name = "YUYV 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVU420, .name = "YVYU 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_UYVY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_VYUY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG8, .name = "Bayer 8 GRBG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, .name = "Bayer 10 BGGR DPCM 8", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGBRG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG10, .name = "Bayer 10 GBRG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10, .name = "Bayer 10 GRBG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SRGGB10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB10, .name = "Bayer 10 RGGB", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SBGGR12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR12, .name = "Bayer 12 BGGR", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGBRG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG12, .name = "Bayer 12 GBRG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SGRBG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG12, .name = "Bayer 12 GRBG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_SRGGB12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB12, .name = "Bayer 12 RGGB", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, }; int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, unsigned int *numerator, unsigned int *denominator) { switch (mf->packing) { case SOC_MBUS_PACKING_NONE: case SOC_MBUS_PACKING_EXTEND16: *numerator = 1; *denominator = 1; return 0; case SOC_MBUS_PACKING_EXTEND32: *numerator = 1; *denominator = 1; return 0; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: *numerator = 2; *denominator = 1; return 0; case SOC_MBUS_PACKING_1_5X8: *numerator = 3; *denominator = 2; return 0; case SOC_MBUS_PACKING_VARIABLE: *numerator = 0; *denominator = 1; return 0; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { if (mf->layout != SOC_MBUS_LAYOUT_PACKED) return width * mf->bits_per_sample / 8; switch (mf->packing) { case SOC_MBUS_PACKING_NONE: return width * mf->bits_per_sample / 8; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: case SOC_MBUS_PACKING_EXTEND16: return width * 2; case SOC_MBUS_PACKING_1_5X8: return width * 3 / 2; case SOC_MBUS_PACKING_VARIABLE: return 0; case SOC_MBUS_PACKING_EXTEND32: return width * 4; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_bytes_per_line); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height) { if (mf->layout == SOC_MBUS_LAYOUT_PACKED) return bytes_per_line * height; switch (mf->packing) { case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: return bytes_per_line * height * 2; case SOC_MBUS_PACKING_1_5X8: return bytes_per_line * height * 3 / 2; default: return -EINVAL; } } EXPORT_SYMBOL(soc_mbus_image_size); const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( enum v4l2_mbus_pixelcode code, const struct soc_mbus_lookup *lookup, int n) { int i; for (i = 0; i < n; i++) if (lookup[i].code == code) return &lookup[i].fmt; return NULL; } EXPORT_SYMBOL(soc_mbus_find_fmtdesc); const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( enum v4l2_mbus_pixelcode code) { return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); } EXPORT_SYMBOL(soc_mbus_get_fmtdesc); unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, unsigned int flags) { unsigned long common_flags; bool hsync = true, vsync = true, pclk, data, mode; bool mipi_lanes, mipi_clock; common_flags = cfg->flags & flags; switch (cfg->type) { case V4L2_MBUS_PARALLEL: hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW); vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW); case V4L2_MBUS_BT656: pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING); data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_LOW); mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE); return (!hsync || !vsync || !pclk || !data || !mode) ? 0 : common_flags; case V4L2_MBUS_CSI2: mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES; mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; } return 0; } EXPORT_SYMBOL(soc_mbus_config_compatible); static int __init soc_mbus_init(void) { return 0; } static void __exit soc_mbus_exit(void) { } module_init(soc_mbus_init); module_exit(soc_mbus_exit); MODULE_DESCRIPTION("soc-camera media bus interface"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
thicklizard/m9-patches
drivers/staging/speakup/fakekey.c
3364
2468
/* fakekey.c * Functions for simulating keypresses. * * Copyright (C) 2010 the Speakup Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/slab.h> #include <linux/preempt.h> #include <linux/percpu.h> #include <linux/input.h> #include "speakup.h" #define PRESSED 1 #define RELEASED 0 static DEFINE_PER_CPU(bool, reporting_keystroke); static struct input_dev *virt_keyboard; int speakup_add_virtual_keyboard(void) { int err; virt_keyboard = input_allocate_device(); if (!virt_keyboard) return -ENOMEM; virt_keyboard->name = "Speakup"; virt_keyboard->id.bustype = BUS_VIRTUAL; virt_keyboard->phys = "speakup/input0"; virt_keyboard->dev.parent = NULL; __set_bit(EV_KEY, virt_keyboard->evbit); __set_bit(KEY_DOWN, virt_keyboard->keybit); err = input_register_device(virt_keyboard); if (err) { input_free_device(virt_keyboard); virt_keyboard = NULL; } return err; } void speakup_remove_virtual_keyboard(void) { if (virt_keyboard != NULL) { input_unregister_device(virt_keyboard); virt_keyboard = NULL; } } /* * Send a simulated down-arrow to the application. */ void speakup_fake_down_arrow(void) { unsigned long flags; /* disable keyboard interrupts */ local_irq_save(flags); /* don't change CPU */ preempt_disable(); __this_cpu_write(reporting_keystroke, true); input_report_key(virt_keyboard, KEY_DOWN, PRESSED); input_report_key(virt_keyboard, KEY_DOWN, RELEASED); __this_cpu_write(reporting_keystroke, false); /* reenable preemption */ preempt_enable(); /* reenable keyboard interrupts */ local_irq_restore(flags); } /* * Are we handling a simulated keypress on the current CPU? * Returns a boolean. */ bool speakup_fake_key_pressed(void) { return this_cpu_read(reporting_keystroke); }
gpl-2.0
alpinelinux/linux-stable-grsec
kernel/locking/percpu-rwsem.c
3876
5107
#include <linux/atomic.h> #include <linux/rwsem.h> #include <linux/percpu.h> #include <linux/wait.h> #include <linux/lockdep.h> #include <linux/percpu-rwsem.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/errno.h> int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, const char *name, struct lock_class_key *rwsem_key) { brw->fast_read_ctr = alloc_percpu(int); if (unlikely(!brw->fast_read_ctr)) return -ENOMEM; /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ __init_rwsem(&brw->rw_sem, name, rwsem_key); atomic_set(&brw->write_ctr, 0); atomic_set(&brw->slow_read_ctr, 0); init_waitqueue_head(&brw->write_waitq); return 0; } void percpu_free_rwsem(struct percpu_rw_semaphore *brw) { free_percpu(brw->fast_read_ctr); brw->fast_read_ctr = NULL; /* catch use after free bugs */ } /* * This is the fast-path for down_read/up_read, it only needs to ensure * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the * fast per-cpu counter. The writer uses synchronize_sched_expedited() to * serialize with the preempt-disabled section below. * * The nontrivial part is that we should guarantee acquire/release semantics * in case when * * R_W: down_write() comes after up_read(), the writer should see all * changes done by the reader * or * W_R: down_read() comes after up_write(), the reader should see all * changes done by the writer * * If this helper fails the callers rely on the normal rw_semaphore and * atomic_dec_and_test(), so in this case we have the necessary barriers. * * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or * __this_cpu_add() below can be reordered with any LOAD/STORE done by the * reader inside the critical section. See the comments in down_write and * up_write below. */ static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) { bool success = false; preempt_disable(); if (likely(!atomic_read(&brw->write_ctr))) { __this_cpu_add(*brw->fast_read_ctr, val); success = true; } preempt_enable(); return success; } /* * Like the normal down_read() this is not recursive, the writer can * come after the first percpu_down_read() and create the deadlock. * * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep, * percpu_up_read() does rwsem_release(). This pairs with the usage * of ->rw_sem in percpu_down/up_write(). */ void percpu_down_read(struct percpu_rw_semaphore *brw) { might_sleep(); if (likely(update_fast_ctr(brw, +1))) { rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); return; } down_read(&brw->rw_sem); atomic_inc(&brw->slow_read_ctr); /* avoid up_read()->rwsem_release() */ __up_read(&brw->rw_sem); } void percpu_up_read(struct percpu_rw_semaphore *brw) { rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_); if (likely(update_fast_ctr(brw, -1))) return; /* false-positive is possible but harmless */ if (atomic_dec_and_test(&brw->slow_read_ctr)) wake_up_all(&brw->write_waitq); } static int clear_fast_ctr(struct percpu_rw_semaphore *brw) { unsigned int sum = 0; int cpu; for_each_possible_cpu(cpu) { sum += per_cpu(*brw->fast_read_ctr, cpu); per_cpu(*brw->fast_read_ctr, cpu) = 0; } return sum; } /* * A writer increments ->write_ctr to force the readers to switch to the * slow mode, note the atomic_read() check in update_fast_ctr(). * * After that the readers can only inc/dec the slow ->slow_read_ctr counter, * ->fast_read_ctr is stable. Once the writer moves its sum into the slow * counter it represents the number of active readers. * * Finally the writer takes ->rw_sem for writing and blocks the new readers, * then waits until the slow counter becomes zero. */ void percpu_down_write(struct percpu_rw_semaphore *brw) { /* tell update_fast_ctr() there is a pending writer */ atomic_inc(&brw->write_ctr); /* * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read * so that update_fast_ctr() can't succeed. * * 2. Ensures we see the result of every previous this_cpu_add() in * update_fast_ctr(). * * 3. Ensures that if any reader has exited its critical section via * fast-path, it executes a full memory barrier before we return. * See R_W case in the comment above update_fast_ctr(). */ synchronize_sched_expedited(); /* exclude other writers, and block the new readers completely */ down_write(&brw->rw_sem); /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */ atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr); /* wait for all readers to complete their percpu_up_read() */ wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); } void percpu_up_write(struct percpu_rw_semaphore *brw) { /* release the lock, but the readers can't use the fast-path */ up_write(&brw->rw_sem); /* * Insert the barrier before the next fast-path in down_read, * see W_R case in the comment above update_fast_ctr(). */ synchronize_sched_expedited(); /* the last writer unblocks update_fast_ctr() */ atomic_dec(&brw->write_ctr); }
gpl-2.0
mkasick/android_kernel_samsung_jfltevzw
lib/string_helpers.c
4388
1702
/* * Helpers for formatting and printing strings * * Copyright 31 August 2008 James Bottomley */ #include <linux/kernel.h> #include <linux/math64.h> #include <linux/export.h> #include <linux/string_helpers.h> /** * string_get_size - get the size in the specified units * @size: The size to be converted * @units: units to use (powers of 1000 or 1024) * @buf: buffer to format to * @len: length of buffer * * This function returns a string formatted to 3 significant figures * giving the size in the required units. Returns 0 on success or * error on failure. @buf is always zero terminated. * */ int string_get_size(u64 size, const enum string_size_units units, char *buf, int len) { const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL}; const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", NULL }; const char **units_str[] = { [STRING_UNITS_10] = units_10, [STRING_UNITS_2] = units_2, }; const unsigned int divisor[] = { [STRING_UNITS_10] = 1000, [STRING_UNITS_2] = 1024, }; int i, j; u64 remainder = 0, sf_cap; char tmp[8]; tmp[0] = '\0'; i = 0; if (size >= divisor[units]) { while (size >= divisor[units] && units_str[units][i]) { remainder = do_div(size, divisor[units]); i++; } sf_cap = size; for (j = 0; sf_cap*10 < 1000; j++) sf_cap *= 10; if (j) { remainder *= 1000; do_div(remainder, divisor[units]); snprintf(tmp, sizeof(tmp), ".%03lld", (unsigned long long)remainder); tmp[j+1] = '\0'; } } snprintf(buf, len, "%lld%s %s", (unsigned long long)size, tmp, units_str[units][i]); return 0; } EXPORT_SYMBOL(string_get_size);
gpl-2.0
mdmower/android_kernel_htc_msm8960
drivers/gpu/drm/radeon/radeon_state.c
5668
95104
/* radeon_state.c -- State support for Radeon -*- linux-c -*- */ /* * Copyright 2000 VA Linux Systems, Inc., Fremont, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> * Kevin E. Martin <martin@valinux.com> */ #include "drmP.h" #include "drm.h" #include "drm_buffer.h" #include "drm_sarea.h" #include "radeon_drm.h" #include "radeon_drv.h" /* ================================================================ * Helper functions for client state checking and fixup */ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * dev_priv, struct drm_file * file_priv, u32 *offset) { u64 off = *offset; u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1; struct drm_radeon_driver_file_fields *radeon_priv; /* Hrm ... the story of the offset ... So this function converts * the various ideas of what userland clients might have for an * offset in the card address space into an offset into the card * address space :) So with a sane client, it should just keep * the value intact and just do some boundary checking. However, * not all clients are sane. Some older clients pass us 0 based * offsets relative to the start of the framebuffer and some may * assume the AGP aperture it appended to the framebuffer, so we * try to detect those cases and fix them up. * * Note: It might be a good idea here to make sure the offset lands * in some "allowed" area to protect things like the PCIE GART... */ /* First, the best case, the offset already lands in either the * framebuffer or the GART mapped space */ if (radeon_check_offset(dev_priv, off)) return 0; /* Ok, that didn't happen... now check if we have a zero based * offset that fits in the framebuffer + gart space, apply the * magic offset we get from SETPARAM or calculated from fb_location */ if (off < (dev_priv->fb_size + dev_priv->gart_size)) { radeon_priv = file_priv->driver_priv; off += radeon_priv->radeon_fb_delta; } /* Finally, assume we aimed at a GART offset if beyond the fb */ if (off > fb_end) off = off - fb_end - 1 + dev_priv->gart_vm_start; /* Now recheck and fail if out of bounds */ if (radeon_check_offset(dev_priv, off)) { DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off); *offset = off; return 0; } return -EINVAL; } static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * dev_priv, struct drm_file *file_priv, int id, struct drm_buffer *buf) { u32 *data; switch (id) { case RADEON_EMIT_PP_MISC: data = drm_buffer_pointer_to_dword(buf, (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4); if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; } dev_priv->have_z_offset = 1; break; case RADEON_EMIT_PP_CNTL: data = drm_buffer_pointer_to_dword(buf, (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4); if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid colour buffer offset\n"); return -EINVAL; } break; case R200_EMIT_PP_TXOFFSET_0: case R200_EMIT_PP_TXOFFSET_1: case R200_EMIT_PP_TXOFFSET_2: case R200_EMIT_PP_TXOFFSET_3: case R200_EMIT_PP_TXOFFSET_4: case R200_EMIT_PP_TXOFFSET_5: data = drm_buffer_pointer_to_dword(buf, 0); if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid R200 texture offset\n"); return -EINVAL; } break; case RADEON_EMIT_PP_TXFILTER_0: case RADEON_EMIT_PP_TXFILTER_1: case RADEON_EMIT_PP_TXFILTER_2: data = drm_buffer_pointer_to_dword(buf, (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4); if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid R100 texture offset\n"); return -EINVAL; } break; case R200_EMIT_PP_CUBIC_OFFSETS_0: case R200_EMIT_PP_CUBIC_OFFSETS_1: case R200_EMIT_PP_CUBIC_OFFSETS_2: case R200_EMIT_PP_CUBIC_OFFSETS_3: case R200_EMIT_PP_CUBIC_OFFSETS_4: case R200_EMIT_PP_CUBIC_OFFSETS_5:{ int i; for (i = 0; i < 5; i++) { data = drm_buffer_pointer_to_dword(buf, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR ("Invalid R200 cubic texture offset\n"); return -EINVAL; } } break; } case RADEON_EMIT_PP_CUBIC_OFFSETS_T0: case RADEON_EMIT_PP_CUBIC_OFFSETS_T1: case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ int i; for (i = 0; i < 5; i++) { data = drm_buffer_pointer_to_dword(buf, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR ("Invalid R100 cubic texture offset\n"); return -EINVAL; } } } break; case R200_EMIT_VAP_CTL:{ RING_LOCALS; BEGIN_RING(2); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ADVANCE_RING(); } break; case RADEON_EMIT_RB3D_COLORPITCH: case RADEON_EMIT_RE_LINE_PATTERN: case RADEON_EMIT_SE_LINE_WIDTH: case RADEON_EMIT_PP_LUM_MATRIX: case RADEON_EMIT_PP_ROT_MATRIX_0: case RADEON_EMIT_RB3D_STENCILREFMASK: case RADEON_EMIT_SE_VPORT_XSCALE: case RADEON_EMIT_SE_CNTL: case RADEON_EMIT_SE_CNTL_STATUS: case RADEON_EMIT_RE_MISC: case RADEON_EMIT_PP_BORDER_COLOR_0: case RADEON_EMIT_PP_BORDER_COLOR_1: case RADEON_EMIT_PP_BORDER_COLOR_2: case RADEON_EMIT_SE_ZBIAS_FACTOR: case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT: case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED: case R200_EMIT_PP_TXCBLEND_0: case R200_EMIT_PP_TXCBLEND_1: case R200_EMIT_PP_TXCBLEND_2: case R200_EMIT_PP_TXCBLEND_3: case R200_EMIT_PP_TXCBLEND_4: case R200_EMIT_PP_TXCBLEND_5: case R200_EMIT_PP_TXCBLEND_6: case R200_EMIT_PP_TXCBLEND_7: case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: case R200_EMIT_TFACTOR_0: case R200_EMIT_VTX_FMT_0: case R200_EMIT_MATRIX_SELECT_0: case R200_EMIT_TEX_PROC_CTL_2: case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: case R200_EMIT_PP_TXFILTER_0: case R200_EMIT_PP_TXFILTER_1: case R200_EMIT_PP_TXFILTER_2: case R200_EMIT_PP_TXFILTER_3: case R200_EMIT_PP_TXFILTER_4: case R200_EMIT_PP_TXFILTER_5: case R200_EMIT_VTE_CNTL: case R200_EMIT_OUTPUT_VTX_COMP_SEL: case R200_EMIT_PP_TAM_DEBUG3: case R200_EMIT_PP_CNTL_X: case R200_EMIT_RB3D_DEPTHXY_OFFSET: case R200_EMIT_RE_AUX_SCISSOR_CNTL: case R200_EMIT_RE_SCISSOR_TL_0: case R200_EMIT_RE_SCISSOR_TL_1: case R200_EMIT_RE_SCISSOR_TL_2: case R200_EMIT_SE_VAP_CNTL_STATUS: case R200_EMIT_SE_VTX_STATE_CNTL: case R200_EMIT_RE_POINTSIZE: case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0: case R200_EMIT_PP_CUBIC_FACES_0: case R200_EMIT_PP_CUBIC_FACES_1: case R200_EMIT_PP_CUBIC_FACES_2: case R200_EMIT_PP_CUBIC_FACES_3: case R200_EMIT_PP_CUBIC_FACES_4: case R200_EMIT_PP_CUBIC_FACES_5: case RADEON_EMIT_PP_TEX_SIZE_0: case RADEON_EMIT_PP_TEX_SIZE_1: case RADEON_EMIT_PP_TEX_SIZE_2: case R200_EMIT_RB3D_BLENDCOLOR: case R200_EMIT_TCL_POINT_SPRITE_CNTL: case RADEON_EMIT_PP_CUBIC_FACES_0: case RADEON_EMIT_PP_CUBIC_FACES_1: case RADEON_EMIT_PP_CUBIC_FACES_2: case R200_EMIT_PP_TRI_PERF_CNTL: case R200_EMIT_PP_AFS_0: case R200_EMIT_PP_AFS_1: case R200_EMIT_ATF_TFACTOR: case R200_EMIT_PP_TXCTLALL_0: case R200_EMIT_PP_TXCTLALL_1: case R200_EMIT_PP_TXCTLALL_2: case R200_EMIT_PP_TXCTLALL_3: case R200_EMIT_PP_TXCTLALL_4: case R200_EMIT_PP_TXCTLALL_5: case R200_EMIT_VAP_PVS_CNTL: /* These packets don't contain memory offsets */ break; default: DRM_ERROR("Unknown state packet ID %d\n", id); return -EINVAL; } return 0; } static int radeon_check_and_fixup_packet3(drm_radeon_private_t * dev_priv, struct drm_file *file_priv, drm_radeon_kcmd_buffer_t * cmdbuf, unsigned int *cmdsz) { u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); u32 offset, narrays; int count, i, k; count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16); *cmdsz = 2 + count; if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) { DRM_ERROR("Not a type 3 packet\n"); return -EINVAL; } if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) { DRM_ERROR("Packet size larger than size of data provided\n"); return -EINVAL; } switch (*cmd & 0xff00) { /* XXX Are there old drivers needing other packets? */ case RADEON_3D_DRAW_IMMD: case RADEON_3D_DRAW_VBUF: case RADEON_3D_DRAW_INDX: case RADEON_WAIT_FOR_IDLE: case RADEON_CP_NOP: case RADEON_3D_CLEAR_ZMASK: /* case RADEON_CP_NEXT_CHAR: case RADEON_CP_PLY_NEXTSCAN: case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */ /* these packets are safe */ break; case RADEON_CP_3D_DRAW_IMMD_2: case RADEON_CP_3D_DRAW_VBUF_2: case RADEON_CP_3D_DRAW_INDX_2: case RADEON_3D_CLEAR_HIZ: /* safe but r200 only */ if (dev_priv->microcode_version != UCODE_R200) { DRM_ERROR("Invalid 3d packet for r100-class chip\n"); return -EINVAL; } break; case RADEON_3D_LOAD_VBPNTR: if (count > 18) { /* 12 arrays max */ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); return -EINVAL; } /* carefully check packet contents */ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); narrays = *cmd & ~0xc000; k = 0; i = 2; while ((k < narrays) && (i < (count + 2))) { i++; /* skip attribute field */ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) { DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); return -EINVAL; } k++; i++; if (k == narrays) break; /* have one more to process, they come in pairs */ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) { DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); return -EINVAL; } k++; i++; } /* do the counts match what we expect ? */ if ((k != narrays) || (i != (count + 2))) { DRM_ERROR ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count + 1); return -EINVAL; } break; case RADEON_3D_RNDR_GEN_INDX_PRIM: if (dev_priv->microcode_version != UCODE_R100) { DRM_ERROR("Invalid 3d packet for r200-class chip\n"); return -EINVAL; } cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) { DRM_ERROR("Invalid rndr_gen_indx offset\n"); return -EINVAL; } break; case RADEON_CP_INDX_BUFFER: if (dev_priv->microcode_version != UCODE_R200) { DRM_ERROR("Invalid 3d packet for r100-class chip\n"); return -EINVAL; } cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); if ((*cmd & 0x8000ffff) != 0x80000810) { DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd); return -EINVAL; } cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) { DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd); return -EINVAL; } break; case RADEON_CNTL_HOSTDATA_BLT: case RADEON_CNTL_PAINT_MULTI: case RADEON_CNTL_BITBLT_MULTI: /* MSB of opcode: next DWORD GUI_CNTL */ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); offset = *cmd2 << 10; if (radeon_check_and_fixup_offset (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid first packet offset\n"); return -EINVAL; } *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10; } if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); offset = *cmd3 << 10; if (radeon_check_and_fixup_offset (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid second packet offset\n"); return -EINVAL; } *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10; } break; default: DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00); return -EINVAL; } return 0; } /* ================================================================ * CP hardware state programming functions */ static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, struct drm_clip_rect * box) { RING_LOCALS; DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n", box->x1, box->y1, box->x2, box->y2); BEGIN_RING(4); OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0)); OUT_RING((box->y1 << 16) | box->x1); OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0)); OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1)); ADVANCE_RING(); } /* Emit 1.1 state */ static int radeon_emit_state(drm_radeon_private_t * dev_priv, struct drm_file *file_priv, drm_radeon_context_regs_t * ctx, drm_radeon_texture_regs_t * tex, unsigned int dirty) { RING_LOCALS; DRM_DEBUG("dirty=0x%08x\n", dirty); if (dirty & RADEON_UPLOAD_CONTEXT) { if (radeon_check_and_fixup_offset(dev_priv, file_priv, &ctx->rb3d_depthoffset)) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; } if (radeon_check_and_fixup_offset(dev_priv, file_priv, &ctx->rb3d_coloroffset)) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; } BEGIN_RING(14); OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6)); OUT_RING(ctx->pp_misc); OUT_RING(ctx->pp_fog_color); OUT_RING(ctx->re_solid_color); OUT_RING(ctx->rb3d_blendcntl); OUT_RING(ctx->rb3d_depthoffset); OUT_RING(ctx->rb3d_depthpitch); OUT_RING(ctx->rb3d_zstencilcntl); OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2)); OUT_RING(ctx->pp_cntl); OUT_RING(ctx->rb3d_cntl); OUT_RING(ctx->rb3d_coloroffset); OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0)); OUT_RING(ctx->rb3d_colorpitch); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_VERTFMT) { BEGIN_RING(2); OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0)); OUT_RING(ctx->se_coord_fmt); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_LINE) { BEGIN_RING(5); OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1)); OUT_RING(ctx->re_line_pattern); OUT_RING(ctx->re_line_state); OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0)); OUT_RING(ctx->se_line_width); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_BUMPMAP) { BEGIN_RING(5); OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0)); OUT_RING(ctx->pp_lum_matrix); OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1)); OUT_RING(ctx->pp_rot_matrix_0); OUT_RING(ctx->pp_rot_matrix_1); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_MASKS) { BEGIN_RING(4); OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2)); OUT_RING(ctx->rb3d_stencilrefmask); OUT_RING(ctx->rb3d_ropcntl); OUT_RING(ctx->rb3d_planemask); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_VIEWPORT) { BEGIN_RING(7); OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5)); OUT_RING(ctx->se_vport_xscale); OUT_RING(ctx->se_vport_xoffset); OUT_RING(ctx->se_vport_yscale); OUT_RING(ctx->se_vport_yoffset); OUT_RING(ctx->se_vport_zscale); OUT_RING(ctx->se_vport_zoffset); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_SETUP) { BEGIN_RING(4); OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0)); OUT_RING(ctx->se_cntl); OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0)); OUT_RING(ctx->se_cntl_status); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_MISC) { BEGIN_RING(2); OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0)); OUT_RING(ctx->re_misc); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_TEX0) { if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex[0].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 0\n"); return -EINVAL; } BEGIN_RING(9); OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5)); OUT_RING(tex[0].pp_txfilter); OUT_RING(tex[0].pp_txformat); OUT_RING(tex[0].pp_txoffset); OUT_RING(tex[0].pp_txcblend); OUT_RING(tex[0].pp_txablend); OUT_RING(tex[0].pp_tfactor); OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0)); OUT_RING(tex[0].pp_border_color); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_TEX1) { if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex[1].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 1\n"); return -EINVAL; } BEGIN_RING(9); OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5)); OUT_RING(tex[1].pp_txfilter); OUT_RING(tex[1].pp_txformat); OUT_RING(tex[1].pp_txoffset); OUT_RING(tex[1].pp_txcblend); OUT_RING(tex[1].pp_txablend); OUT_RING(tex[1].pp_tfactor); OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0)); OUT_RING(tex[1].pp_border_color); ADVANCE_RING(); } if (dirty & RADEON_UPLOAD_TEX2) { if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex[2].pp_txoffset)) { DRM_ERROR("Invalid texture offset for unit 2\n"); return -EINVAL; } BEGIN_RING(9); OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5)); OUT_RING(tex[2].pp_txfilter); OUT_RING(tex[2].pp_txformat); OUT_RING(tex[2].pp_txoffset); OUT_RING(tex[2].pp_txcblend); OUT_RING(tex[2].pp_txablend); OUT_RING(tex[2].pp_tfactor); OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0)); OUT_RING(tex[2].pp_border_color); ADVANCE_RING(); } return 0; } /* Emit 1.2 state */ static int radeon_emit_state2(drm_radeon_private_t * dev_priv, struct drm_file *file_priv, drm_radeon_state_t * state) { RING_LOCALS; if (state->dirty & RADEON_UPLOAD_ZBIAS) { BEGIN_RING(3); OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1)); OUT_RING(state->context2.se_zbias_factor); OUT_RING(state->context2.se_zbias_constant); ADVANCE_RING(); } return radeon_emit_state(dev_priv, file_priv, &state->context, state->tex, state->dirty); } /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in * 1.3 cmdbuffers allow all previous state to be updated as well as * the tcl scalar and vector areas. */ static struct { int start; int len; const char *name; } packet[RADEON_MAX_STATE_PACKETS] = { {RADEON_PP_MISC, 7, "RADEON_PP_MISC"}, {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"}, {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"}, {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"}, {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"}, {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"}, {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"}, {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"}, {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"}, {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"}, {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"}, {RADEON_RE_MISC, 1, "RADEON_RE_MISC"}, {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"}, {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"}, {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"}, {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"}, {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"}, {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"}, {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"}, {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"}, {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17, "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"}, {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"}, {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"}, {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"}, {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"}, {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"}, {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"}, {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"}, {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"}, {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"}, {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"}, {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"}, {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"}, {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"}, {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"}, {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"}, {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"}, {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"}, {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"}, {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"}, {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"}, {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"}, {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"}, {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"}, {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"}, {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"}, {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"}, {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"}, {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"}, {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"}, {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"}, {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"}, {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"}, {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"}, {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"}, {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"}, {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"}, {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"}, {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"}, {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"}, {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"}, {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"}, {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"}, {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"}, {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"}, {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"}, {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"}, {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"}, {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"}, {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"}, {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"}, {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"}, {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"}, {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"}, {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"}, }; /* ================================================================ * Performance monitoring functions */ static void radeon_clear_box(drm_radeon_private_t * dev_priv, struct drm_radeon_master_private *master_priv, int x, int y, int w, int h, int r, int g, int b) { u32 color; RING_LOCALS; x += master_priv->sarea_priv->boxes[0].x1; y += master_priv->sarea_priv->boxes[0].y1; switch (dev_priv->color_fmt) { case RADEON_COLOR_FORMAT_RGB565: color = (((r & 0xf8) << 8) | ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); break; case RADEON_COLOR_FORMAT_ARGB8888: default: color = (((0xff) << 24) | (r << 16) | (g << 8) | b); break; } BEGIN_RING(4); RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0)); OUT_RING(0xffffffff); ADVANCE_RING(); BEGIN_RING(6); OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4)); OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_SOLID_COLOR | (dev_priv->color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); if (master_priv->sarea_priv->pfCurrentPage == 1) { OUT_RING(dev_priv->front_pitch_offset); } else { OUT_RING(dev_priv->back_pitch_offset); } OUT_RING(color); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv) { /* Collapse various things into a wait flag -- trying to * guess if userspase slept -- better just to have them tell us. */ if (dev_priv->stats.last_frame_reads > 1 || dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } if (dev_priv->stats.freelist_loops) { dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } /* Purple box for page flipping */ if (dev_priv->stats.boxes & RADEON_BOX_FLIP) radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255); /* Red box if we have to wait for idle at any point */ if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0); /* Blue box: lost context? */ /* Yellow box for texture swaps */ if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0); /* Green box if hardware never idles (as far as we can tell) */ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0); /* Draw bars indicating number of buffers allocated * (not a great measure, easily confused) */ if (dev_priv->stats.requested_bufs) { if (dev_priv->stats.requested_bufs > 100) dev_priv->stats.requested_bufs = 100; radeon_clear_box(dev_priv, master_priv, 4, 16, dev_priv->stats.requested_bufs, 4, 196, 128, 128); } memset(&dev_priv->stats, 0, sizeof(dev_priv->stats)); } /* ================================================================ * CP command dispatch functions */ static void radeon_cp_dispatch_clear(struct drm_device * dev, struct drm_master *master, drm_radeon_clear_t * clear, drm_radeon_clear_rect_t * depth_boxes) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = master->driver_priv; drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; struct drm_clip_rect *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0; int i; RING_LOCALS; DRM_DEBUG("flags = 0x%x\n", flags); dev_priv->stats.clears++; if (sarea_priv->pfCurrentPage == 1) { unsigned int tmp = flags; flags &= ~(RADEON_FRONT | RADEON_BACK); if (tmp & RADEON_FRONT) flags |= RADEON_BACK; if (tmp & RADEON_BACK) flags |= RADEON_FRONT; } if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { if (!dev_priv->have_z_offset) { printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); flags &= ~(RADEON_DEPTH | RADEON_STENCIL); } } if (flags & (RADEON_FRONT | RADEON_BACK)) { BEGIN_RING(4); /* Ensure the 3D stream is idle before doing a * 2D fill to clear the front or back buffer. */ RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0)); OUT_RING(clear->color_mask); ADVANCE_RING(); /* Make sure we restore the 3D state next time. */ sarea_priv->ctx_owner = 0; for (i = 0; i < nbox; i++) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n", x, y, w, h, flags); if (flags & RADEON_FRONT) { BEGIN_RING(6); OUT_RING(CP_PACKET3 (RADEON_CNTL_PAINT_MULTI, 4)); OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_SOLID_COLOR | (dev_priv-> color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); OUT_RING(dev_priv->front_pitch_offset); OUT_RING(clear->clear_color); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } if (flags & RADEON_BACK) { BEGIN_RING(6); OUT_RING(CP_PACKET3 (RADEON_CNTL_PAINT_MULTI, 4)); OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_SOLID_COLOR | (dev_priv-> color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); OUT_RING(dev_priv->back_pitch_offset); OUT_RING(clear->clear_color); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } } } /* hyper z clear */ /* no docs available, based on reverse engineering by Stephane Marchesin */ if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) && (flags & RADEON_CLEAR_FASTZ)) { int i; int depthpixperline = dev_priv->depth_fmt == RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch / 2) : (dev_priv-> depth_pitch / 4); u32 clearmask; u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth | ((clear->depth_mask & 0xff) << 24); /* Make sure we restore the 3D state next time. * we haven't touched any "normal" state - still need this? */ sarea_priv->ctx_owner = 0; if ((dev_priv->flags & RADEON_HAS_HIERZ) && (flags & RADEON_USE_HIERZ)) { /* FIXME : reverse engineer that for Rx00 cards */ /* FIXME : the mask supposedly contains low-res z values. So can't set just to the max (0xff? or actually 0x3fff?), need to take z clear value into account? */ /* pattern seems to work for r100, though get slight rendering errors with glxgears. If hierz is not enabled for r100, only 4 bits which indicate clear (15,16,31,32, all zero) matter, the other ones are ignored, and the same clear mask can be used. That's very different behaviour than R200 which needs different clear mask and different number of tiles to clear if hierz is enabled or not !?! */ clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f; } else { /* clear mask : chooses the clearing pattern. rv250: could be used to clear only parts of macrotiles (but that would get really complicated...)? bit 0 and 1 (either or both of them ?!?!) are used to not clear tile (or maybe one of the bits indicates if the tile is compressed or not), bit 2 and 3 to not clear tile 1,...,. Pattern is as follows: | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29| bits ------------------------------------------------- | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31| rv100: clearmask covers 2x8 4x1 tiles, but one clear still covers 256 pixels ?!? */ clearmask = 0x0; } BEGIN_RING(8); RADEON_WAIT_UNTIL_2D_IDLE(); OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE, tempRB3D_DEPTHCLEARVALUE); /* what offset is this exactly ? */ OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0); /* need ctlstat, otherwise get some strange black flickering */ OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT, RADEON_RB3D_ZC_FLUSH_ALL); ADVANCE_RING(); for (i = 0; i < nbox; i++) { int tileoffset, nrtilesx, nrtilesy, j; /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ if ((dev_priv->flags & RADEON_HAS_HIERZ) && !(dev_priv->microcode_version == UCODE_R200)) { /* FIXME : figure this out for r200 (when hierz is enabled). Or maybe r200 actually doesn't need to put the low-res z value into the tile cache like r100, but just needs to clear the hi-level z-buffer? Works for R100, both with hierz and without. R100 seems to operate on 2x1 8x8 tiles, but... odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially problematic with resolutions which are not 64 pix aligned? */ tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 6; nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4; nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3); for (j = 0; j <= nrtilesy; j++) { BEGIN_RING(4); OUT_RING(CP_PACKET3 (RADEON_3D_CLEAR_ZMASK, 2)); /* first tile */ OUT_RING(tileoffset * 8); /* the number of tiles to clear */ OUT_RING(nrtilesx + 4); /* clear mask : chooses the clearing pattern. */ OUT_RING(clearmask); ADVANCE_RING(); tileoffset += depthpixperline >> 6; } } else if (dev_priv->microcode_version == UCODE_R200) { /* works for rv250. */ /* find first macro tile (8x2 4x4 z-pixels on rv250) */ tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 5; nrtilesx = (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5); nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3); for (j = 0; j <= nrtilesy; j++) { BEGIN_RING(4); OUT_RING(CP_PACKET3 (RADEON_3D_CLEAR_ZMASK, 2)); /* first tile */ /* judging by the first tile offset needed, could possibly directly address/clear 4x4 tiles instead of 8x2 * 4x4 macro tiles, though would still need clear mask for right/bottom if truly 4x4 granularity is desired ? */ OUT_RING(tileoffset * 16); /* the number of tiles to clear */ OUT_RING(nrtilesx + 1); /* clear mask : chooses the clearing pattern. */ OUT_RING(clearmask); ADVANCE_RING(); tileoffset += depthpixperline >> 5; } } else { /* rv 100 */ /* rv100 might not need 64 pix alignment, who knows */ /* offsets are, hmm, weird */ tileoffset = ((pbox[i].y1 >> 4) * depthpixperline + pbox[i].x1) >> 6; nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4; nrtilesy = (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4); for (j = 0; j <= nrtilesy; j++) { BEGIN_RING(4); OUT_RING(CP_PACKET3 (RADEON_3D_CLEAR_ZMASK, 2)); OUT_RING(tileoffset * 128); /* the number of tiles to clear */ OUT_RING(nrtilesx + 4); /* clear mask : chooses the clearing pattern. */ OUT_RING(clearmask); ADVANCE_RING(); tileoffset += depthpixperline >> 6; } } } /* TODO don't always clear all hi-level z tiles */ if ((dev_priv->flags & RADEON_HAS_HIERZ) && (dev_priv->microcode_version == UCODE_R200) && (flags & RADEON_USE_HIERZ)) /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ /* FIXME : the mask supposedly contains low-res z values. So can't set just to the max (0xff? or actually 0x3fff?), need to take z clear value into account? */ { BEGIN_RING(4); OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2)); OUT_RING(0x0); /* First tile */ OUT_RING(0x3cc0); OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f); ADVANCE_RING(); } } /* We have to clear the depth and/or stencil buffers by * rendering a quad into just those buffers. Thus, we have to * make sure the 3D engine is configured correctly. */ else if ((dev_priv->microcode_version == UCODE_R200) && (flags & (RADEON_DEPTH | RADEON_STENCIL))) { int tempPP_CNTL; int tempRE_CNTL; int tempRB3D_CNTL; int tempRB3D_ZSTENCILCNTL; int tempRB3D_STENCILREFMASK; int tempRB3D_PLANEMASK; int tempSE_CNTL; int tempSE_VTE_CNTL; int tempSE_VTX_FMT_0; int tempSE_VTX_FMT_1; int tempSE_VAP_CNTL; int tempRE_AUX_SCISSOR_CNTL; tempPP_CNTL = 0; tempRE_CNTL = 0; tempRB3D_CNTL = depth_clear->rb3d_cntl; tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; tempRB3D_STENCILREFMASK = 0x0; tempSE_CNTL = depth_clear->se_cntl; /* Disable TCL */ tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */ (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT)); tempRB3D_PLANEMASK = 0x0; tempRE_AUX_SCISSOR_CNTL = 0x0; tempSE_VTE_CNTL = SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK; /* Vertex format (X, Y, Z, W) */ tempSE_VTX_FMT_0 = SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK | SE_VTX_FMT_0__VTX_W0_PRESENT_MASK; tempSE_VTX_FMT_1 = 0x0; /* * Depth buffer specific enables */ if (flags & RADEON_DEPTH) { /* Enable depth buffer */ tempRB3D_CNTL |= RADEON_Z_ENABLE; } else { /* Disable depth buffer */ tempRB3D_CNTL &= ~RADEON_Z_ENABLE; } /* * Stencil buffer specific enables */ if (flags & RADEON_STENCIL) { tempRB3D_CNTL |= RADEON_STENCIL_ENABLE; tempRB3D_STENCILREFMASK = clear->depth_mask; } else { tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE; tempRB3D_STENCILREFMASK = 0x00000000; } if (flags & RADEON_USE_COMP_ZBUF) { tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE | RADEON_Z_DECOMPRESSION_ENABLE; } if (flags & RADEON_USE_HIERZ) { tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE; } BEGIN_RING(26); RADEON_WAIT_UNTIL_2D_IDLE(); OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL); OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL); OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL); OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL); OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, tempRB3D_STENCILREFMASK); OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK); OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL); OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL); OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0); OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1); OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL); OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL); ADVANCE_RING(); /* Make sure we restore the 3D state next time. */ sarea_priv->ctx_owner = 0; for (i = 0; i < nbox; i++) { /* Funny that this should be required -- * sets top-left? */ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); BEGIN_RING(14); OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12)); OUT_RING((RADEON_PRIM_TYPE_RECT_LIST | RADEON_PRIM_WALK_RING | (3 << RADEON_NUM_VERTICES_SHIFT))); OUT_RING(depth_boxes[i].ui[CLEAR_X1]); OUT_RING(depth_boxes[i].ui[CLEAR_Y1]); OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); OUT_RING(0x3f800000); OUT_RING(depth_boxes[i].ui[CLEAR_X1]); OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); OUT_RING(0x3f800000); OUT_RING(depth_boxes[i].ui[CLEAR_X2]); OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); OUT_RING(0x3f800000); ADVANCE_RING(); } } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) { int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; rb3d_cntl = depth_clear->rb3d_cntl; if (flags & RADEON_DEPTH) { rb3d_cntl |= RADEON_Z_ENABLE; } else { rb3d_cntl &= ~RADEON_Z_ENABLE; } if (flags & RADEON_STENCIL) { rb3d_cntl |= RADEON_STENCIL_ENABLE; rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ } else { rb3d_cntl &= ~RADEON_STENCIL_ENABLE; rb3d_stencilrefmask = 0x00000000; } if (flags & RADEON_USE_COMP_ZBUF) { tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE | RADEON_Z_DECOMPRESSION_ENABLE; } if (flags & RADEON_USE_HIERZ) { tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE; } BEGIN_RING(13); RADEON_WAIT_UNTIL_2D_IDLE(); OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1)); OUT_RING(0x00000000); OUT_RING(rb3d_cntl); OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL); OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask); OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000); OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl); ADVANCE_RING(); /* Make sure we restore the 3D state next time. */ sarea_priv->ctx_owner = 0; for (i = 0; i < nbox; i++) { /* Funny that this should be required -- * sets top-left? */ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); BEGIN_RING(15); OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13)); OUT_RING(RADEON_VTX_Z_PRESENT | RADEON_VTX_PKCOLOR_PRESENT); OUT_RING((RADEON_PRIM_TYPE_RECT_LIST | RADEON_PRIM_WALK_RING | RADEON_MAOS_ENABLE | RADEON_VTX_FMT_RADEON_MODE | (3 << RADEON_NUM_VERTICES_SHIFT))); OUT_RING(depth_boxes[i].ui[CLEAR_X1]); OUT_RING(depth_boxes[i].ui[CLEAR_Y1]); OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); OUT_RING(0x0); OUT_RING(depth_boxes[i].ui[CLEAR_X1]); OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); OUT_RING(0x0); OUT_RING(depth_boxes[i].ui[CLEAR_X2]); OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); OUT_RING(0x0); ADVANCE_RING(); } } /* Increment the clear counter. The client-side 3D driver must * wait on this value before performing the clear ioctl. We * need this because the card's so damned fast... */ sarea_priv->last_clear++; BEGIN_RING(4); RADEON_CLEAR_AGE(sarea_priv->last_clear); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); } static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = master->driver_priv; drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; int nbox = sarea_priv->nbox; struct drm_clip_rect *pbox = sarea_priv->boxes; int i; RING_LOCALS; DRM_DEBUG("\n"); /* Do some trivial performance monitoring... */ if (dev_priv->do_boxes) radeon_cp_performance_boxes(dev_priv, master_priv); /* Wait for the 3D stream to idle before dispatching the bitblt. * This will prevent data corruption between the two streams. */ BEGIN_RING(2); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); for (i = 0; i < nbox; i++) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h); BEGIN_RING(9); OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0)); OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (dev_priv->color_fmt << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); /* Make this work even if front & back are flipped: */ OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); if (sarea_priv->pfCurrentPage == 0) { OUT_RING(dev_priv->back_pitch_offset); OUT_RING(dev_priv->front_pitch_offset); } else { OUT_RING(dev_priv->front_pitch_offset); OUT_RING(dev_priv->back_pitch_offset); } OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2)); OUT_RING((x << 16) | y); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } /* Increment the frame counter. The client-side 3D driver must * throttle the framerate by waiting for this value before * performing the swapbuffer ioctl. */ sarea_priv->last_frame++; BEGIN_RING(4); RADEON_FRAME_AGE(sarea_priv->last_frame); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); } void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = master->driver_priv; struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle; int offset = (master_priv->sarea_priv->pfCurrentPage == 1) ? dev_priv->front_offset : dev_priv->back_offset; RING_LOCALS; DRM_DEBUG("pfCurrentPage=%d\n", master_priv->sarea_priv->pfCurrentPage); /* Do some trivial performance monitoring... */ if (dev_priv->do_boxes) { dev_priv->stats.boxes |= RADEON_BOX_FLIP; radeon_cp_performance_boxes(dev_priv, master_priv); } /* Update the frame offsets for both CRTCs */ BEGIN_RING(6); RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING_REG(RADEON_CRTC_OFFSET, ((sarea->frame.y * dev_priv->front_pitch + sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) + offset); OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base + offset); ADVANCE_RING(); /* Increment the frame counter. The client-side 3D driver must * throttle the framerate by waiting for this value before * performing the swapbuffer ioctl. */ master_priv->sarea_priv->last_frame++; master_priv->sarea_priv->pfCurrentPage = 1 - master_priv->sarea_priv->pfCurrentPage; BEGIN_RING(2); RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame); ADVANCE_RING(); } static int bad_prim_vertex_nr(int primitive, int nr) { switch (primitive & RADEON_PRIM_TYPE_MASK) { case RADEON_PRIM_TYPE_NONE: case RADEON_PRIM_TYPE_POINT: return nr < 1; case RADEON_PRIM_TYPE_LINE: return (nr & 1) || nr == 0; case RADEON_PRIM_TYPE_LINE_STRIP: return nr < 2; case RADEON_PRIM_TYPE_TRI_LIST: case RADEON_PRIM_TYPE_3VRT_POINT_LIST: case RADEON_PRIM_TYPE_3VRT_LINE_LIST: case RADEON_PRIM_TYPE_RECT_LIST: return nr % 3 || nr == 0; case RADEON_PRIM_TYPE_TRI_FAN: case RADEON_PRIM_TYPE_TRI_STRIP: return nr < 3; default: return 1; } } typedef struct { unsigned int start; unsigned int finish; unsigned int prim; unsigned int numverts; unsigned int offset; unsigned int vc_format; } drm_radeon_tcl_prim_t; static void radeon_cp_dispatch_vertex(struct drm_device * dev, struct drm_file *file_priv, struct drm_buf * buf, drm_radeon_tcl_prim_t * prim) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; int numverts = (int)prim->numverts; int nbox = sarea_priv->nbox; int i = 0; RING_LOCALS; DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", prim->prim, prim->vc_format, prim->start, prim->finish, prim->numverts); if (bad_prim_vertex_nr(prim->prim, prim->numverts)) { DRM_ERROR("bad prim %x numverts %d\n", prim->prim, prim->numverts); return; } do { /* Emit the next cliprect */ if (i < nbox) { radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); } /* Emit the vertex buffer rendering commands */ BEGIN_RING(5); OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3)); OUT_RING(offset); OUT_RING(numverts); OUT_RING(prim->vc_format); OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST | RADEON_COLOR_ORDER_RGBA | RADEON_VTX_FMT_RADEON_MODE | (numverts << RADEON_NUM_VERTICES_SHIFT)); ADVANCE_RING(); i++; } while (i < nbox); } void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = master->driver_priv; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; RING_LOCALS; buf_priv->age = ++master_priv->sarea_priv->last_dispatch; /* Emit the vertex buffer age */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { BEGIN_RING(3); R600_DISPATCH_AGE(buf_priv->age); ADVANCE_RING(); } else { BEGIN_RING(2); RADEON_DISPATCH_AGE(buf_priv->age); ADVANCE_RING(); } buf->pending = 1; buf->used = 0; } static void radeon_cp_dispatch_indirect(struct drm_device * dev, struct drm_buf * buf, int start, int end) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); if (start != end) { int offset = (dev_priv->gart_buffers_offset + buf->offset + start); int dwords = (end - start + 3) / sizeof(u32); /* Indirect buffer data must be an even number of * dwords, so if we've been given an odd number we must * pad the data with a Type-2 CP packet. */ if (dwords & 1) { u32 *data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset + start); data[dwords++] = RADEON_CP_PACKET2; } /* Fire off the indirect buffer */ BEGIN_RING(3); OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); OUT_RING(offset); OUT_RING(dwords); ADVANCE_RING(); } } static void radeon_cp_dispatch_indices(struct drm_device *dev, struct drm_master *master, struct drm_buf * elt_buf, drm_radeon_tcl_prim_t * prim) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = master->driver_priv; drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; int offset = dev_priv->gart_buffers_offset + prim->offset; u32 *data; int dwords; int i = 0; int start = prim->start + RADEON_INDEX_PRIM_OFFSET; int count = (prim->finish - start) / sizeof(u16); int nbox = sarea_priv->nbox; DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", prim->prim, prim->vc_format, prim->start, prim->finish, prim->offset, prim->numverts); if (bad_prim_vertex_nr(prim->prim, count)) { DRM_ERROR("bad prim %x count %d\n", prim->prim, count); return; } if (start >= prim->finish || (prim->start & 0x7)) { DRM_ERROR("buffer prim %d\n", prim->prim); return; } dwords = (prim->finish - prim->start + 3) / sizeof(u32); data = (u32 *) ((char *)dev->agp_buffer_map->handle + elt_buf->offset + prim->start); data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2); data[1] = offset; data[2] = prim->numverts; data[3] = prim->vc_format; data[4] = (prim->prim | RADEON_PRIM_WALK_IND | RADEON_COLOR_ORDER_RGBA | RADEON_VTX_FMT_RADEON_MODE | (count << RADEON_NUM_VERTICES_SHIFT)); do { if (i < nbox) radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); radeon_cp_dispatch_indirect(dev, elt_buf, prim->start, prim->finish); i++; } while (i < nbox); } #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE static int radeon_cp_dispatch_texture(struct drm_device * dev, struct drm_file *file_priv, drm_radeon_texture_t * tex, drm_radeon_tex_image_t * image) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; u32 format; u32 *buffer; const u8 __user *data; int size, dwords, tex_width, blit_width, spitch; u32 height; int i; u32 texpitch, microtile; u32 offset, byte_offset; RING_LOCALS; if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) { DRM_ERROR("Invalid destination offset\n"); return -EINVAL; } dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; /* Flush the pixel cache. This ensures no pixel data gets mixed * up with the texture data from the host data blit, otherwise * part of the texture image may be corrupted. */ BEGIN_RING(4); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll * use a shift instead. */ switch (tex->format) { case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_RGBA8888: format = RADEON_COLOR_FORMAT_ARGB8888; tex_width = tex->width * 4; blit_width = image->width * 4; break; case RADEON_TXFORMAT_AI88: case RADEON_TXFORMAT_ARGB1555: case RADEON_TXFORMAT_RGB565: case RADEON_TXFORMAT_ARGB4444: case RADEON_TXFORMAT_VYUY422: case RADEON_TXFORMAT_YVYU422: format = RADEON_COLOR_FORMAT_RGB565; tex_width = tex->width * 2; blit_width = image->width * 2; break; case RADEON_TXFORMAT_I8: case RADEON_TXFORMAT_RGB332: format = RADEON_COLOR_FORMAT_CI8; tex_width = tex->width * 1; blit_width = image->width * 1; break; default: DRM_ERROR("invalid texture format %d\n", tex->format); return -EINVAL; } spitch = blit_width >> 6; if (spitch == 0 && image->height > 1) return -EINVAL; texpitch = tex->pitch; if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { microtile = 1; if (tex_width < 64) { texpitch &= ~(RADEON_DST_TILE_MICRO >> 22); /* we got tiled coordinates, untile them */ image->x *= 2; } } else microtile = 0; /* this might fail for zero-sized uploads - are those illegal? */ if (!radeon_check_offset(dev_priv, tex->offset + image->height * blit_width - 1)) { DRM_ERROR("Invalid final destination offset\n"); return -EINVAL; } DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width); do { DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", tex->offset >> 10, tex->pitch, tex->format, image->x, image->y, image->width, image->height); /* Make a copy of some parameters in case we have to * update them for a multi-pass texture blit. */ height = image->height; data = (const u8 __user *)image->data; size = height * blit_width; if (size > RADEON_MAX_TEXTURE_SIZE) { height = RADEON_MAX_TEXTURE_SIZE / blit_width; size = height * blit_width; } else if (size < 4 && size > 0) { size = 4; } else if (size == 0) { return 0; } buf = radeon_freelist_get(dev); if (0 && !buf) { radeon_do_cp_idle(dev_priv); buf = radeon_freelist_get(dev); } if (!buf) { DRM_DEBUG("EAGAIN\n"); if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) return -EFAULT; return -EAGAIN; } /* Dispatch the indirect buffer. */ buffer = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); dwords = size / 4; #define RADEON_COPY_MT(_buf, _data, _width) \ do { \ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ return -EFAULT; \ } \ } while(0) if (microtile) { /* texture micro tiling in use, minimum texture width is thus 16 bytes. however, we cannot use blitter directly for texture width < 64 bytes, since minimum tex pitch is 64 bytes and we need this to match the texture width, otherwise the blitter will tile it wrong. Thus, tiling manually in this case. Additionally, need to special case tex height = 1, since our actual image will have height 2 and we need to ensure we don't read beyond the texture size from user space. */ if (tex->height == 1) { if (tex_width >= 64 || tex_width <= 16) { RADEON_COPY_MT(buffer, data, (int)(tex_width * sizeof(u32))); } else if (tex_width == 32) { RADEON_COPY_MT(buffer, data, 16); RADEON_COPY_MT(buffer + 8, data + 16, 16); } } else if (tex_width >= 64 || tex_width == 16) { RADEON_COPY_MT(buffer, data, (int)(dwords * sizeof(u32))); } else if (tex_width < 16) { for (i = 0; i < tex->height; i++) { RADEON_COPY_MT(buffer, data, tex_width); buffer += 4; data += tex_width; } } else if (tex_width == 32) { /* TODO: make sure this works when not fitting in one buffer (i.e. 32bytes x 2048...) */ for (i = 0; i < tex->height; i += 2) { RADEON_COPY_MT(buffer, data, 16); data += 16; RADEON_COPY_MT(buffer + 8, data, 16); data += 16; RADEON_COPY_MT(buffer + 4, data, 16); data += 16; RADEON_COPY_MT(buffer + 12, data, 16); data += 16; buffer += 16; } } } else { if (tex_width >= 32) { /* Texture image width is larger than the minimum, so we * can upload it directly. */ RADEON_COPY_MT(buffer, data, (int)(dwords * sizeof(u32))); } else { /* Texture image width is less than the minimum, so we * need to pad out each image scanline to the minimum * width. */ for (i = 0; i < tex->height; i++) { RADEON_COPY_MT(buffer, data, tex_width); buffer += 8; data += tex_width; } } } #undef RADEON_COPY_MT byte_offset = (image->y & ~2047) * blit_width; buf->file_priv = file_priv; buf->used = size; offset = dev_priv->gart_buffers_offset + buf->offset; BEGIN_RING(9); OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (format << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); OUT_RING((spitch << 22) | (offset >> 10)); OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10))); OUT_RING(0); OUT_RING((image->x << 16) | (image->y % 2048)); OUT_RING((image->width << 16) | height); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); COMMIT_RING(); radeon_cp_discard_buffer(dev, file_priv->master, buf); /* Update the input parameters for next time */ image->y += height; image->height -= height; image->data = (const u8 __user *)image->data + size; } while (image->height > 0); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING(4); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); COMMIT_RING(); return 0; } static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple) { drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(35); OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0)); OUT_RING(0x00000000); OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31)); for (i = 0; i < 32; i++) { OUT_RING(stipple[i]); } ADVANCE_RING(); } static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t *dev_priv) { if (!dev_priv->mmio) return; radeon_do_cp_idle(dev_priv); RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index, dev_priv->surfaces[surf_index].flags); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index, dev_priv->surfaces[surf_index].lower); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index, dev_priv->surfaces[surf_index].upper); } /* Allocates a virtual surface * doesn't always allocate a real surface, will stretch an existing * surface when possible. * * Note that refcount can be at most 2, since during a free refcount=3 * might mean we have to allocate a new surface which might not always * be available. * For example : we allocate three contiguous surfaces ABC. If B is * freed, we suddenly need two surfaces to store A and C, which might * not always be available. */ static int alloc_surface(drm_radeon_surface_alloc_t *new, drm_radeon_private_t *dev_priv, struct drm_file *file_priv) { struct radeon_virt_surface *s; int i; int virt_surface_index; uint32_t new_upper, new_lower; new_lower = new->address; new_upper = new_lower + new->size - 1; /* sanity check */ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) || ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0)) return -1; /* make sure there is no overlap with existing surfaces */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { if ((dev_priv->surfaces[i].refcount != 0) && (((new_lower >= dev_priv->surfaces[i].lower) && (new_lower < dev_priv->surfaces[i].upper)) || ((new_lower < dev_priv->surfaces[i].lower) && (new_upper > dev_priv->surfaces[i].lower)))) { return -1; } } /* find a virtual surface */ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) if (dev_priv->virt_surfaces[i].file_priv == NULL) break; if (i == 2 * RADEON_MAX_SURFACES) { return -1; } virt_surface_index = i; /* try to reuse an existing surface */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { /* extend before */ if ((dev_priv->surfaces[i].refcount == 1) && (new->flags == dev_priv->surfaces[i].flags) && (new_upper + 1 == dev_priv->surfaces[i].lower)) { s = &(dev_priv->virt_surfaces[virt_surface_index]); s->surface_index = i; s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; s->file_priv = file_priv; dev_priv->surfaces[i].refcount++; dev_priv->surfaces[i].lower = s->lower; radeon_apply_surface_regs(s->surface_index, dev_priv); return virt_surface_index; } /* extend after */ if ((dev_priv->surfaces[i].refcount == 1) && (new->flags == dev_priv->surfaces[i].flags) && (new_lower == dev_priv->surfaces[i].upper + 1)) { s = &(dev_priv->virt_surfaces[virt_surface_index]); s->surface_index = i; s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; s->file_priv = file_priv; dev_priv->surfaces[i].refcount++; dev_priv->surfaces[i].upper = s->upper; radeon_apply_surface_regs(s->surface_index, dev_priv); return virt_surface_index; } } /* okay, we need a new one */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { if (dev_priv->surfaces[i].refcount == 0) { s = &(dev_priv->virt_surfaces[virt_surface_index]); s->surface_index = i; s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; s->file_priv = file_priv; dev_priv->surfaces[i].refcount = 1; dev_priv->surfaces[i].lower = s->lower; dev_priv->surfaces[i].upper = s->upper; dev_priv->surfaces[i].flags = s->flags; radeon_apply_surface_regs(s->surface_index, dev_priv); return virt_surface_index; } } /* we didn't find anything */ return -1; } static int free_surface(struct drm_file *file_priv, drm_radeon_private_t * dev_priv, int lower) { struct radeon_virt_surface *s; int i; /* find the virtual surface */ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { s = &(dev_priv->virt_surfaces[i]); if (s->file_priv) { if ((lower == s->lower) && (file_priv == s->file_priv)) { if (dev_priv->surfaces[s->surface_index]. lower == s->lower) dev_priv->surfaces[s->surface_index]. lower = s->upper; if (dev_priv->surfaces[s->surface_index]. upper == s->upper) dev_priv->surfaces[s->surface_index]. upper = s->lower; dev_priv->surfaces[s->surface_index].refcount--; if (dev_priv->surfaces[s->surface_index]. refcount == 0) dev_priv->surfaces[s->surface_index]. flags = 0; s->file_priv = NULL; radeon_apply_surface_regs(s->surface_index, dev_priv); return 0; } } } return 1; } static void radeon_surfaces_release(struct drm_file *file_priv, drm_radeon_private_t * dev_priv) { int i; for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { if (dev_priv->virt_surfaces[i].file_priv == file_priv) free_surface(file_priv, dev_priv, dev_priv->virt_surfaces[i].lower); } } /* ================================================================ * IOCTL functions */ static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_alloc_t *alloc = data; if (alloc_surface(alloc, dev_priv, file_priv) == -1) return -EINVAL; else return 0; } static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_free_t *memfree = data; if (free_surface(file_priv, dev_priv, memfree->address)) return -EINVAL; else return 0; } static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; drm_radeon_clear_t *clear = data; drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]))) return -EFAULT; radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes); COMMIT_RING(); return 0; } /* Not sure why this isn't set all the time: */ static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = master->driver_priv; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(6); RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0)); OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) | RADEON_CRTC_OFFSET_FLIP_CNTL); OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0)); OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) | RADEON_CRTC_OFFSET_FLIP_CNTL); ADVANCE_RING(); dev_priv->page_flipping = 1; if (master_priv->sarea_priv->pfCurrentPage != 1) master_priv->sarea_priv->pfCurrentPage = 0; return 0; } /* Swapping and flipping are different operations, need different ioctls. * They can & should be intermixed to support multiple 3d windows. */ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (!dev_priv->page_flipping) radeon_do_init_pageflip(dev, file_priv->master); radeon_cp_dispatch_flip(dev, file_priv->master); COMMIT_RING(); return 0; } static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_cp_dispatch_swap(dev, file_priv); else radeon_cp_dispatch_swap(dev, file_priv->master); sarea_priv->ctx_owner = 0; COMMIT_RING(); return 0; } static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_vertex_t *vertex = data; drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN(dev, file_priv); sarea_priv = master_priv->sarea_priv; DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex->idx, dma->buf_count - 1); return -EINVAL; } if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { DRM_ERROR("buffer prim %d\n", vertex->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[vertex->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex->idx); return -EINVAL; } /* Build up a prim_t record: */ if (vertex->count) { buf->used = vertex->count; /* not used? */ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { if (radeon_emit_state(dev_priv, file_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty)) { DRM_ERROR("radeon_emit_state failed\n"); return -EINVAL; } sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | RADEON_UPLOAD_TEX1IMAGES | RADEON_UPLOAD_TEX2IMAGES | RADEON_REQUIRE_QUIESCENCE); } prim.start = 0; prim.finish = vertex->count; /* unused */ prim.prim = vertex->prim; prim.numverts = vertex->count; prim.vc_format = sarea_priv->vc_format; radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim); } if (vertex->discard) { radeon_cp_discard_buffer(dev, file_priv->master, buf); } COMMIT_RING(); return 0; } static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_indices_t *elts = data; drm_radeon_tcl_prim_t prim; int count; LOCK_TEST_WITH_RETURN(dev, file_priv); sarea_priv = master_priv->sarea_priv; DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", DRM_CURRENTPID, elts->idx, elts->start, elts->end, elts->discard); if (elts->idx < 0 || elts->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", elts->idx, dma->buf_count - 1); return -EINVAL; } if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { DRM_ERROR("buffer prim %d\n", elts->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[elts->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", elts->idx); return -EINVAL; } count = (elts->end - elts->start) / sizeof(u16); elts->start -= RADEON_INDEX_PRIM_OFFSET; if (elts->start & 0x7) { DRM_ERROR("misaligned buffer 0x%x\n", elts->start); return -EINVAL; } if (elts->start < buf->used) { DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); return -EINVAL; } buf->used = elts->end; if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { if (radeon_emit_state(dev_priv, file_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty)) { DRM_ERROR("radeon_emit_state failed\n"); return -EINVAL; } sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | RADEON_UPLOAD_TEX1IMAGES | RADEON_UPLOAD_TEX2IMAGES | RADEON_REQUIRE_QUIESCENCE); } /* Build up a prim_t record: */ prim.start = elts->start; prim.finish = elts->end; prim.prim = elts->prim; prim.offset = 0; /* offset from start of dma buffers */ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ prim.vc_format = sarea_priv->vc_format; radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim); if (elts->discard) { radeon_cp_discard_buffer(dev, file_priv->master, buf); } COMMIT_RING(); return 0; } static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_texture_t *tex = data; drm_radeon_tex_image_t image; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); if (tex->image == NULL) { DRM_ERROR("null texture image!\n"); return -EINVAL; } if (DRM_COPY_FROM_USER(&image, (drm_radeon_tex_image_t __user *) tex->image, sizeof(image))) return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image); else ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); return ret; } static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_stipple_t *stipple = data; u32 mask[32]; LOCK_TEST_WITH_RETURN(dev, file_priv); if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); radeon_cp_dispatch_stipple(dev, mask); COMMIT_RING(); return 0; } static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_indirect_t *indirect = data; RING_LOCALS; LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", indirect->idx, indirect->start, indirect->end, indirect->discard); if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", indirect->idx, dma->buf_count - 1); return -EINVAL; } buf = dma->buflist[indirect->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", indirect->idx); return -EINVAL; } if (indirect->start < buf->used) { DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", indirect->start, buf->used); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf->used = indirect->end; /* Dispatch the indirect buffer full of commands from the * X server. This is insecure and is thus only available to * privileged clients. */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) r600_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); else { /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. */ BEGIN_RING(2); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); } if (indirect->discard) { radeon_cp_discard_buffer(dev, file_priv->master, buf); } COMMIT_RING(); return 0; } static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_vertex2_t *vertex = data; int i; unsigned char laststate; LOCK_TEST_WITH_RETURN(dev, file_priv); sarea_priv = master_priv->sarea_priv; DRM_DEBUG("pid=%d index=%d discard=%d\n", DRM_CURRENTPID, vertex->idx, vertex->discard); if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex->idx, dma->buf_count - 1); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[vertex->idx]; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex->idx); return -EINVAL; } if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) return -EINVAL; for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) { drm_radeon_prim_t prim; drm_radeon_tcl_prim_t tclprim; if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim))) return -EFAULT; if (prim.stateidx != laststate) { drm_radeon_state_t state; if (DRM_COPY_FROM_USER(&state, &vertex->state[prim.stateidx], sizeof(state))) return -EFAULT; if (radeon_emit_state2(dev_priv, file_priv, &state)) { DRM_ERROR("radeon_emit_state2 failed\n"); return -EINVAL; } laststate = prim.stateidx; } tclprim.start = prim.start; tclprim.finish = prim.finish; tclprim.prim = prim.prim; tclprim.vc_format = prim.vc_format; if (prim.prim & RADEON_PRIM_WALK_IND) { tclprim.offset = prim.numverts * 64; tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim); } else { tclprim.numverts = prim.numverts; tclprim.offset = 0; /* not used */ radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim); } if (sarea_priv->nbox == 1) sarea_priv->nbox = 0; } if (vertex->discard) { radeon_cp_discard_buffer(dev, file_priv->master, buf); } COMMIT_RING(); return 0; } static int radeon_emit_packets(drm_radeon_private_t * dev_priv, struct drm_file *file_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { int id = (int)header.packet.packet_id; int sz, reg; RING_LOCALS; if (id >= RADEON_MAX_STATE_PACKETS) return -EINVAL; sz = packet[id].len; reg = packet[id].start; if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) { DRM_ERROR("Packet size provided larger than data provided\n"); return -EINVAL; } if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, cmdbuf->buffer)) { DRM_ERROR("Packet verification failed\n"); return -EINVAL; } BEGIN_RING(sz + 1); OUT_RING(CP_PACKET0(reg, (sz - 1))); OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); return 0; } static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.scalars.count; int start = header.scalars.offset; int stride = header.scalars.stride; RING_LOCALS; BEGIN_RING(3 + sz); OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); return 0; } /* God this is ugly */ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.scalars.count; int start = ((unsigned int)header.scalars.offset) + 0x100; int stride = header.scalars.stride; RING_LOCALS; BEGIN_RING(3 + sz); OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); return 0; } static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.vectors.count; int start = header.vectors.offset; int stride = header.vectors.stride; RING_LOCALS; BEGIN_RING(5 + sz); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); return 0; } static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_kcmd_buffer_t *cmdbuf) { int sz = header.veclinear.count * 4; int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8); RING_LOCALS; if (!sz) return 0; if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; BEGIN_RING(5 + sz); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); return 0; } static int radeon_emit_packet3(struct drm_device * dev, struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { drm_radeon_private_t *dev_priv = dev->dev_private; unsigned int cmdsz; int ret; RING_LOCALS; DRM_DEBUG("\n"); if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, cmdbuf, &cmdsz))) { DRM_ERROR("Packet verification failed\n"); return ret; } BEGIN_RING(cmdsz); OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz); ADVANCE_RING(); return 0; } static int radeon_emit_packet3_cliprect(struct drm_device *dev, struct drm_file *file_priv, drm_radeon_kcmd_buffer_t *cmdbuf, int orig_nbox) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_clip_rect box; unsigned int cmdsz; int ret; struct drm_clip_rect __user *boxes = cmdbuf->boxes; int i = 0; RING_LOCALS; DRM_DEBUG("\n"); if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, cmdbuf, &cmdsz))) { DRM_ERROR("Packet verification failed\n"); return ret; } if (!orig_nbox) goto out; do { if (i < cmdbuf->nbox) { if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) return -EFAULT; /* FIXME The second and subsequent times round * this loop, send a WAIT_UNTIL_3D_IDLE before * calling emit_clip_rect(). This fixes a * lockup on fast machines when sending * several cliprects with a cmdbuf, as when * waving a 2D window over a 3D * window. Something in the commands from user * space seems to hang the card when they're * sent several times in a row. That would be * the correct place to fix it but this works * around it until I can figure that out - Tim * Smith */ if (i) { BEGIN_RING(2); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); } radeon_emit_clip_rect(dev_priv, &box); } BEGIN_RING(cmdsz); OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz); ADVANCE_RING(); } while (++i < cmdbuf->nbox); if (cmdbuf->nbox == 1) cmdbuf->nbox = 0; return 0; out: drm_buffer_advance(cmdbuf->buffer, cmdsz * 4); return 0; } static int radeon_emit_wait(struct drm_device * dev, int flags) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("%x\n", flags); switch (flags) { case RADEON_WAIT_2D: BEGIN_RING(2); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); break; case RADEON_WAIT_3D: BEGIN_RING(2); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); break; case RADEON_WAIT_2D | RADEON_WAIT_3D: BEGIN_RING(2); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); break; default: return -EINVAL; } return 0; } static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf = NULL; drm_radeon_cmd_header_t stack_header; int idx; drm_radeon_kcmd_buffer_t *cmdbuf = data; int orig_nbox; LOCK_TEST_WITH_RETURN(dev, file_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) { return -EINVAL; } /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid * races between checking values and using those values in other code, * and simply to avoid a lot of function calls to copy in data. */ if (cmdbuf->bufsz != 0) { int rv; void __user *buffer = cmdbuf->buffer; rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz); if (rv) return rv; rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, cmdbuf->bufsz); if (rv) { drm_buffer_free(cmdbuf->buffer); return rv; } } else goto done; orig_nbox = cmdbuf->nbox; if (dev_priv->microcode_version == UCODE_R300) { int temp; temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); drm_buffer_free(cmdbuf->buffer); return temp; } /* microcode_version != r300 */ while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) { drm_radeon_cmd_header_t *header; header = drm_buffer_read_object(cmdbuf->buffer, sizeof(stack_header), &stack_header); switch (header->header.cmd_type) { case RADEON_CMD_PACKET: DRM_DEBUG("RADEON_CMD_PACKET\n"); if (radeon_emit_packets (dev_priv, file_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_packets failed\n"); goto err; } break; case RADEON_CMD_SCALARS: DRM_DEBUG("RADEON_CMD_SCALARS\n"); if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_scalars failed\n"); goto err; } break; case RADEON_CMD_VECTORS: DRM_DEBUG("RADEON_CMD_VECTORS\n"); if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_vectors failed\n"); goto err; } break; case RADEON_CMD_DMA_DISCARD: DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); idx = header->dma.buf_idx; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", idx, dma->buf_count - 1); goto err; } buf = dma->buflist[idx]; if (buf->file_priv != file_priv || buf->pending) { DRM_ERROR("bad buffer %p %p %d\n", buf->file_priv, file_priv, buf->pending); goto err; } radeon_cp_discard_buffer(dev, file_priv->master, buf); break; case RADEON_CMD_PACKET3: DRM_DEBUG("RADEON_CMD_PACKET3\n"); if (radeon_emit_packet3(dev, file_priv, cmdbuf)) { DRM_ERROR("radeon_emit_packet3 failed\n"); goto err; } break; case RADEON_CMD_PACKET3_CLIP: DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); if (radeon_emit_packet3_cliprect (dev, file_priv, cmdbuf, orig_nbox)) { DRM_ERROR("radeon_emit_packet3_clip failed\n"); goto err; } break; case RADEON_CMD_SCALARS2: DRM_DEBUG("RADEON_CMD_SCALARS2\n"); if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_scalars2 failed\n"); goto err; } break; case RADEON_CMD_WAIT: DRM_DEBUG("RADEON_CMD_WAIT\n"); if (radeon_emit_wait(dev, header->wait.flags)) { DRM_ERROR("radeon_emit_wait failed\n"); goto err; } break; case RADEON_CMD_VECLINEAR: DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_veclinear failed\n"); goto err; } break; default: DRM_ERROR("bad cmd_type %d at byte %d\n", header->header.cmd_type, cmdbuf->buffer->iterator); goto err; } } drm_buffer_free(cmdbuf->buffer); done: DRM_DEBUG("DONE\n"); COMMIT_RING(); return 0; err: drm_buffer_free(cmdbuf->buffer); return -EINVAL; } static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_getparam_t *param = data; int value; DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); switch (param->param) { case RADEON_PARAM_GART_BUFFER_OFFSET: value = dev_priv->gart_buffers_offset; break; case RADEON_PARAM_LAST_FRAME: dev_priv->stats.last_frame_reads++; value = GET_SCRATCH(dev_priv, 0); break; case RADEON_PARAM_LAST_DISPATCH: value = GET_SCRATCH(dev_priv, 1); break; case RADEON_PARAM_LAST_CLEAR: dev_priv->stats.last_clear_reads++; value = GET_SCRATCH(dev_priv, 2); break; case RADEON_PARAM_IRQ_NR: if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) value = 0; else value = drm_dev_to_irq(dev); break; case RADEON_PARAM_GART_BASE: value = dev_priv->gart_vm_start; break; case RADEON_PARAM_REGISTER_HANDLE: value = dev_priv->mmio->offset; break; case RADEON_PARAM_STATUS_HANDLE: value = dev_priv->ring_rptr_offset; break; #if BITS_PER_LONG == 32 /* * This ioctl() doesn't work on 64-bit platforms because hw_lock is a * pointer which can't fit into an int-sized variable. According to * Michel Dänzer, the ioctl() is only used on embedded platforms, so * not supporting it shouldn't be a problem. If the same functionality * is needed on 64-bit platforms, a new ioctl() would have to be added, * so backwards-compatibility for the embedded platforms can be * maintained. --davidm 4-Feb-2004. */ case RADEON_PARAM_SAREA_HANDLE: /* The lock is the first dword in the sarea. */ /* no users of this parameter */ break; #endif case RADEON_PARAM_GART_TEX_HANDLE: value = dev_priv->gart_textures_offset; break; case RADEON_PARAM_SCRATCH_OFFSET: if (!dev_priv->writeback_works) return -EINVAL; if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) value = R600_SCRATCH_REG_OFFSET; else value = RADEON_SCRATCH_REG_OFFSET; break; case RADEON_PARAM_CARD_TYPE: if (dev_priv->flags & RADEON_IS_PCIE) value = RADEON_CARD_PCIE; else if (dev_priv->flags & RADEON_IS_AGP) value = RADEON_CARD_AGP; else value = RADEON_CARD_PCI; break; case RADEON_PARAM_VBLANK_CRTC: value = radeon_vblank_crtc_get(dev); break; case RADEON_PARAM_FB_LOCATION: value = radeon_read_fb_location(dev_priv); break; case RADEON_PARAM_NUM_GB_PIPES: value = dev_priv->num_gb_pipes; break; case RADEON_PARAM_NUM_Z_PIPES: value = dev_priv->num_z_pipes; break; default: DRM_DEBUG("Invalid parameter %d\n", param->param); return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_setparam_t *sp = data; struct drm_radeon_driver_file_fields *radeon_priv; switch (sp->param) { case RADEON_SETPARAM_FB_LOCATION: radeon_priv = file_priv->driver_priv; radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp->value; break; case RADEON_SETPARAM_SWITCH_TILING: if (sp->value == 0) { DRM_DEBUG("color tiling disabled\n"); dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; if (master_priv->sarea_priv) master_priv->sarea_priv->tiling_enabled = 0; } else if (sp->value == 1) { DRM_DEBUG("color tiling enabled\n"); dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; if (master_priv->sarea_priv) master_priv->sarea_priv->tiling_enabled = 1; } break; case RADEON_SETPARAM_PCIGART_LOCATION: dev_priv->pcigart_offset = sp->value; dev_priv->pcigart_offset_set = 1; break; case RADEON_SETPARAM_NEW_MEMMAP: dev_priv->new_memmap = sp->value; break; case RADEON_SETPARAM_PCIGART_TABLE_SIZE: dev_priv->gart_info.table_size = sp->value; if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; break; case RADEON_SETPARAM_VBLANK_CRTC: return radeon_vblank_crtc_set(dev, sp->value); break; default: DRM_DEBUG("Invalid parameter %d\n", sp->param); return -EINVAL; } return 0; } /* When a client dies: * - Check for and clean up flipped page state * - Free any alloced GART memory. * - Free any alloced radeon surfaces. * * DRM infrastructure takes care of reclaiming dma buffers. */ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->page_flipping = 0; radeon_mem_release(file_priv, dev_priv->gart_heap); radeon_mem_release(file_priv, dev_priv->fb_heap); radeon_surfaces_release(file_priv, dev_priv); } } void radeon_driver_lastclose(struct drm_device *dev) { radeon_surfaces_release(PCIGART_FILE_PRIV, dev->dev_private); radeon_do_release(dev); } int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_driver_file_fields *radeon_priv; DRM_DEBUG("\n"); radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL); if (!radeon_priv) return -ENOMEM; file_priv->driver_priv = radeon_priv; if (dev_priv) radeon_priv->radeon_fb_delta = dev_priv->fb_location; else radeon_priv->radeon_fb_delta = 0; return 0; } void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_radeon_driver_file_fields *radeon_priv = file_priv->driver_priv; kfree(radeon_priv); } struct drm_ioctl_desc radeon_ioctls[] = { DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) }; int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
gpl-2.0
davidmueller13/android_kernel_lge_msm8974
arch/unicore32/mm/mmu.c
5924
13116
/* * linux/arch/unicore32/mm/mmu.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/fs.h> #include <linux/bootmem.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> #include <asm/memblock.h> #include <mach/map.h> #include "mm.h" /* * empty_zero_page is a special page that is used for * zero-initialized data and COW. */ struct page *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); /* * The pmd table for the upper-most set of pages. */ pmd_t *top_pmd; pgprot_t pgprot_user; EXPORT_SYMBOL(pgprot_user); pgprot_t pgprot_kernel; EXPORT_SYMBOL(pgprot_kernel); static int __init noalign_setup(char *__unused) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); return 1; } __setup("noalign", noalign_setup); void adjust_cr(unsigned long mask, unsigned long set) { unsigned long flags; mask &= ~CR_A; set &= mask; local_irq_save(flags); cr_no_alignment = (cr_no_alignment & ~mask) | set; cr_alignment = (cr_alignment & ~mask) | set; set_cr((get_cr() & ~mask) | set); local_irq_restore(flags); } struct map_desc { unsigned long virtual; unsigned long pfn; unsigned long length; unsigned int type; }; #define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \ PTE_DIRTY | PTE_READ | PTE_WRITE) #define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \ PMD_SECT_READ | PMD_SECT_WRITE) static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered */ .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, .prot_sect = PROT_SECT_DEVICE, }, /* * MT_KUSER: pte for vecpage -- cacheable, * and sect for unigfx mmap -- noncacheable */ [MT_KUSER] = { .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_CACHEABLE | PTE_READ | PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, .prot_sect = PROT_SECT_DEVICE, }, [MT_HIGH_VECTORS] = { .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_CACHEABLE | PTE_READ | PTE_WRITE | PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, }, [MT_MEMORY] = { .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_WRITE | PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE | PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC, }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE | PMD_SECT_READ, }, }; const struct mem_type *get_mem_type(unsigned int type) { return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; } EXPORT_SYMBOL(get_mem_type); /* * Adjust the PMD section entries according to the CPU in use. */ static void __init build_mem_type_table(void) { pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE); pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_READ | PTE_WRITE | PTE_EXEC | PTE_CACHEABLE); } #define vectors_base() (vectors_high() ? 0xffff0000 : 0) static void __init *early_alloc(unsigned long sz) { void *ptr = __va(memblock_alloc(sz, sz)); memset(ptr, 0, sz); return ptr; } static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | prot); } BUG_ON(pmd_bad(*pmd)); return pte_offset_kernel(pmd, addr); } static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type) { pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); do { set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long phys, const struct mem_type *type) { pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); /* * Try a section mapping - end, addr and phys must all be aligned * to a section boundary. */ if (((addr | end | phys) & ~SECTION_MASK) == 0) { pmd_t *p = pmd; do { set_pmd(pmd, __pmd(phys | type->prot_sect)); phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); flush_pmd_entry(p); } else { /* * No need to loop; pte's aren't interested in the * individual L1 entries. */ alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); } } /* * Create the page directory entries and any necessary * page tables for the mapping specified by `md'. We * are able to cope here with varying sizes and address * offsets, and we take full advantage of sections. */ static void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; pgd_t *pgd; if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { printk(KERN_WARNING "BUG: not creating mapping for " "0x%08llx at 0x%08lx in user region\n", __pfn_to_phys((u64)md->pfn), md->virtual); return; } if ((md->type == MT_DEVICE || md->type == MT_ROM) && md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " "overlaps vmalloc space\n", __pfn_to_phys((u64)md->pfn), md->virtual); } type = &mem_types[md->type]; addr = md->virtual & PAGE_MASK; phys = (unsigned long)__pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " "be mapped using pages, ignoring.\n", __pfn_to_phys(md->pfn), addr); return; } pgd = pgd_offset_k(addr); end = addr + length; do { unsigned long next = pgd_addr_end(addr, end); alloc_init_section(pgd, addr, next, phys, type); phys += next - addr; addr = next; } while (pgd++, addr != end); } static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ static int __init early_vmalloc(char *arg) { unsigned long vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; printk(KERN_WARNING "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); printk(KERN_WARNING "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); return 0; } early_param("vmalloc", early_vmalloc); static phys_addr_t lowmem_limit __initdata = SZ_1G; static void __init sanity_check_meminfo(void) { int i, j; lowmem_limit = __pa(vmalloc_min - 1) + 1; memblock_set_current_limit(lowmem_limit); for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; j++; } meminfo.nr_banks = j; } static inline void prepare_page_table(void) { unsigned long addr; phys_addr_t end; /* * Clear out all the mappings below the kernel image. */ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); /* * Find the end of the first block of lowmem. */ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; if (end >= lowmem_limit) end = lowmem_limit; /* * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ for (addr = __phys_to_virt(end); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } /* * Reserve the special regions of memory */ void __init uc32_mm_memblock_reserve(void) { /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); } /* * Set up device the mappings. Since we clear out the page tables for all * mappings above VMALLOC_END, we will remove any debug device mappings. * This means you have to be careful how you debug this function, or any * called function. This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. */ static void __init devicemaps_init(void) { struct map_desc map; unsigned long addr; void *vectors; /* * Allocate the vector page early. */ vectors = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); /* * Create a mapping for the machine vectors at the high-vectors * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = VECTORS_BASE; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; create_mapping(&map); /* * Create a mapping for the kuser page at the special * location (0xbfff0000) to the same vectors location. */ map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = KUSER_VECPAGE_BASE; map.length = PAGE_SIZE; map.type = MT_KUSER; create_mapping(&map); /* * Finally flush the caches and tlb to ensure that we're in a * consistent state wrt the writebuffer. This also ensures that * any write-allocated cache lines in the vector page are written * back. After this point, we can start to touch devices again. */ local_flush_tlb_all(); flush_cache_all(); } static void __init map_lowmem(void) { struct memblock_region *reg; /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { phys_addr_t start = reg->base; phys_addr_t end = start + reg->size; struct map_desc map; if (end > lowmem_limit) end = lowmem_limit; if (start >= end) break; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY; create_mapping(&map); } } /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ void __init paging_init(void) { void *zero_page; build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); map_lowmem(); devicemaps_init(); top_pmd = pmd_off_k(0xffff0000); /* allocate the zero page. */ zero_page = early_alloc(PAGE_SIZE); bootmem_init(); empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } /* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ void setup_mm_for_reboot(char mode) { unsigned long base_pmdval; pgd_t *pgd; int i; /* * We need to access to user-mode page tables here. For kernel threads * we don't have any user-mode mappings so we use the context that we * "borrowed". */ pgd = current->active_mm->pgd; base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT; for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; pmd_t *pmd; pmd = pmd_off(pgd, i << PGDIR_SHIFT); set_pmd(pmd, __pmd(pmdval)); flush_pmd_entry(pmd); } local_flush_tlb_all(); } /* * Take care of architecture specific things when placing a new PTE into * a page table, or changing an existing PTE. Basically, there are two * things that we need to take care of: * * 1. If PG_dcache_clean is not set for the page, we need to ensure * that any cache entries for the kernels virtual memory * range are written back to the page. * 2. If we have multiple shared mappings of the same space in * an object, we need to deal with the cache aliasing issues. * * Note that the pte lock will be held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) return; /* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ page = pfn_to_page(pfn); if (page == ZERO_PAGE(0)) return; mapping = page_mapping(page); if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); if (mapping) if (vma->vm_flags & VM_EXEC) __flush_icache_all(); }
gpl-2.0
ffosilva/android_kernel_sony_msm8974
arch/arm/plat-mxc/devices/platform-mxc_rtc.c
7716
1032
/* * Copyright (C) 2010-2011 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_mxc_rtc_data_entry_single(soc) \ { \ .iobase = soc ## _RTC_BASE_ADDR, \ .irq = soc ## _INT_RTC, \ } #ifdef CONFIG_SOC_IMX31 const struct imx_mxc_rtc_data imx31_mxc_rtc_data __initconst = imx_mxc_rtc_data_entry_single(MX31); #endif /* ifdef CONFIG_SOC_IMX31 */ struct platform_device *__init imx_add_mxc_rtc( const struct imx_mxc_rtc_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("mxc_rtc", -1, res, ARRAY_SIZE(res), NULL, 0); }
gpl-2.0
NoelMacwan/SXDNickiSS
arch/mips/loongson/lemote-2f/machtype.c
8740
1639
/* * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/bootinfo.h> #include <loongson.h> void __init mach_prom_init_machtype(void) { /* We share the same kernel image file among Lemote 2F family * of machines, and provide the machtype= kernel command line * to users to indicate their machine, this command line will * be passed by the latest PMON automatically. and fortunately, * up to now, we can get the machine type from the PMON_VER= * commandline directly except the NAS machine, In the old * machines, this will help the users a lot. * * If no "machtype=" passed, get machine type from "PMON_VER=". * PMON_VER=LM8089 Lemote 8.9'' netbook * LM8101 Lemote 10.1'' netbook * (The above two netbooks have the same kernel support) * LM6XXX Lemote FuLoong(2F) box series * LM9XXX Lemote LynLoong PC series */ if (strstr(arcs_cmdline, "PMON_VER=LM")) { if (strstr(arcs_cmdline, "PMON_VER=LM8")) mips_machtype = MACH_LEMOTE_YL2F89; else if (strstr(arcs_cmdline, "PMON_VER=LM6")) mips_machtype = MACH_LEMOTE_FL2F; else if (strstr(arcs_cmdline, "PMON_VER=LM9")) mips_machtype = MACH_LEMOTE_LL2F; else mips_machtype = MACH_LEMOTE_NAS; strcat(arcs_cmdline, " machtype="); strcat(arcs_cmdline, get_system_type()); strcat(arcs_cmdline, " "); } }
gpl-2.0
loglud/acclaim_kernel
arch/mips/loongson/lemote-2f/machtype.c
8740
1639
/* * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/bootinfo.h> #include <loongson.h> void __init mach_prom_init_machtype(void) { /* We share the same kernel image file among Lemote 2F family * of machines, and provide the machtype= kernel command line * to users to indicate their machine, this command line will * be passed by the latest PMON automatically. and fortunately, * up to now, we can get the machine type from the PMON_VER= * commandline directly except the NAS machine, In the old * machines, this will help the users a lot. * * If no "machtype=" passed, get machine type from "PMON_VER=". * PMON_VER=LM8089 Lemote 8.9'' netbook * LM8101 Lemote 10.1'' netbook * (The above two netbooks have the same kernel support) * LM6XXX Lemote FuLoong(2F) box series * LM9XXX Lemote LynLoong PC series */ if (strstr(arcs_cmdline, "PMON_VER=LM")) { if (strstr(arcs_cmdline, "PMON_VER=LM8")) mips_machtype = MACH_LEMOTE_YL2F89; else if (strstr(arcs_cmdline, "PMON_VER=LM6")) mips_machtype = MACH_LEMOTE_FL2F; else if (strstr(arcs_cmdline, "PMON_VER=LM9")) mips_machtype = MACH_LEMOTE_LL2F; else mips_machtype = MACH_LEMOTE_NAS; strcat(arcs_cmdline, " machtype="); strcat(arcs_cmdline, get_system_type()); strcat(arcs_cmdline, " "); } }
gpl-2.0
pedestre/Kernel-Apolo-JB-4.1.2
drivers/rtc/rtc-au1xxx.c
10020
3636
/* * Au1xxx counter0 (aka Time-Of-Year counter) RTC interface driver. * * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ /* All current Au1xxx SoCs have 2 counters fed by an external 32.768 kHz * crystal. Counter 0, which keeps counting during sleep/powerdown, is * used to count seconds since the beginning of the unix epoch. * * The counters must be configured and enabled by bootloader/board code; * no checks as to whether they really get a proper 32.768kHz clock are * made as this would take far too long. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach-au1x00/au1000.h> /* 32kHz clock enabled and detected */ #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long t; t = au_readl(SYS_TOYREAD); rtc_time_to_tm(t, tm); return rtc_valid_tm(tm); } static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned long t; rtc_tm_to_time(tm, &t); au_writel(t, SYS_TOYWRITE); au_sync(); /* wait for the pending register write to succeed. This can * take up to 6 seconds... */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) msleep(1); return 0; } static struct rtc_class_ops au1xtoy_rtc_ops = { .read_time = au1xtoy_rtc_read_time, .set_time = au1xtoy_rtc_set_time, }; static int __devinit au1xtoy_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtcdev; unsigned long t; int ret; t = au_readl(SYS_COUNTER_CNTRL); if (!(t & CNTR_OK)) { dev_err(&pdev->dev, "counters not working; aborting.\n"); ret = -ENODEV; goto out_err; } ret = -ETIMEDOUT; /* set counter0 tickrate to 1Hz if necessary */ if (au_readl(SYS_TOYTRIM) != 32767) { /* wait until hardware gives access to TRIM register */ t = 0x00100000; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && --t) msleep(1); if (!t) { /* timed out waiting for register access; assume * counters are unusable. */ dev_err(&pdev->dev, "timeout waiting for access\n"); goto out_err; } /* set 1Hz TOY tick rate */ au_writel(32767, SYS_TOYTRIM); au_sync(); } /* wait until the hardware allows writes to the counter reg */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) msleep(1); rtcdev = rtc_device_register("rtc-au1xxx", &pdev->dev, &au1xtoy_rtc_ops, THIS_MODULE); if (IS_ERR(rtcdev)) { ret = PTR_ERR(rtcdev); goto out_err; } platform_set_drvdata(pdev, rtcdev); return 0; out_err: return ret; } static int __devexit au1xtoy_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtcdev = platform_get_drvdata(pdev); rtc_device_unregister(rtcdev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver au1xrtc_driver = { .driver = { .name = "rtc-au1xxx", .owner = THIS_MODULE, }, .remove = __devexit_p(au1xtoy_rtc_remove), }; static int __init au1xtoy_rtc_init(void) { return platform_driver_probe(&au1xrtc_driver, au1xtoy_rtc_probe); } static void __exit au1xtoy_rtc_exit(void) { platform_driver_unregister(&au1xrtc_driver); } module_init(au1xtoy_rtc_init); module_exit(au1xtoy_rtc_exit); MODULE_DESCRIPTION("Au1xxx TOY-counter-based RTC driver"); MODULE_AUTHOR("Manuel Lauss <manuel.lauss@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-au1xxx");
gpl-2.0
cattleprod/for-sense
drivers/rtc/rtc-au1xxx.c
10020
3636
/* * Au1xxx counter0 (aka Time-Of-Year counter) RTC interface driver. * * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ /* All current Au1xxx SoCs have 2 counters fed by an external 32.768 kHz * crystal. Counter 0, which keeps counting during sleep/powerdown, is * used to count seconds since the beginning of the unix epoch. * * The counters must be configured and enabled by bootloader/board code; * no checks as to whether they really get a proper 32.768kHz clock are * made as this would take far too long. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach-au1x00/au1000.h> /* 32kHz clock enabled and detected */ #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long t; t = au_readl(SYS_TOYREAD); rtc_time_to_tm(t, tm); return rtc_valid_tm(tm); } static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned long t; rtc_tm_to_time(tm, &t); au_writel(t, SYS_TOYWRITE); au_sync(); /* wait for the pending register write to succeed. This can * take up to 6 seconds... */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) msleep(1); return 0; } static struct rtc_class_ops au1xtoy_rtc_ops = { .read_time = au1xtoy_rtc_read_time, .set_time = au1xtoy_rtc_set_time, }; static int __devinit au1xtoy_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtcdev; unsigned long t; int ret; t = au_readl(SYS_COUNTER_CNTRL); if (!(t & CNTR_OK)) { dev_err(&pdev->dev, "counters not working; aborting.\n"); ret = -ENODEV; goto out_err; } ret = -ETIMEDOUT; /* set counter0 tickrate to 1Hz if necessary */ if (au_readl(SYS_TOYTRIM) != 32767) { /* wait until hardware gives access to TRIM register */ t = 0x00100000; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && --t) msleep(1); if (!t) { /* timed out waiting for register access; assume * counters are unusable. */ dev_err(&pdev->dev, "timeout waiting for access\n"); goto out_err; } /* set 1Hz TOY tick rate */ au_writel(32767, SYS_TOYTRIM); au_sync(); } /* wait until the hardware allows writes to the counter reg */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) msleep(1); rtcdev = rtc_device_register("rtc-au1xxx", &pdev->dev, &au1xtoy_rtc_ops, THIS_MODULE); if (IS_ERR(rtcdev)) { ret = PTR_ERR(rtcdev); goto out_err; } platform_set_drvdata(pdev, rtcdev); return 0; out_err: return ret; } static int __devexit au1xtoy_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtcdev = platform_get_drvdata(pdev); rtc_device_unregister(rtcdev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver au1xrtc_driver = { .driver = { .name = "rtc-au1xxx", .owner = THIS_MODULE, }, .remove = __devexit_p(au1xtoy_rtc_remove), }; static int __init au1xtoy_rtc_init(void) { return platform_driver_probe(&au1xrtc_driver, au1xtoy_rtc_probe); } static void __exit au1xtoy_rtc_exit(void) { platform_driver_unregister(&au1xrtc_driver); } module_init(au1xtoy_rtc_init); module_exit(au1xtoy_rtc_exit); MODULE_DESCRIPTION("Au1xxx TOY-counter-based RTC driver"); MODULE_AUTHOR("Manuel Lauss <manuel.lauss@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-au1xxx");
gpl-2.0
GioneeDevTeam/android_kernel_gionee_msm8974
drivers/staging/tidspbridge/dynload/tramp_table_c6000.c
11300
3059
/* * tramp_table_c6000.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include "dload_internal.h" /* These are defined in coff.h, but may not be available on all platforms so we'll go ahead and define them here. */ #ifndef R_C60LO16 #define R_C60LO16 0x54 /* C60: MVK Low Half Register */ #define R_C60HI16 0x55 /* C60: MVKH/MVKLH High Half Register */ #endif #define C6X_TRAMP_WORD_COUNT 8 #define C6X_TRAMP_MAX_RELOS 8 /* THIS HASH FUNCTION MUST MATCH THE ONE reloc_table_c6000.c */ #define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63) /* THIS MUST MATCH reloc_record_t FOR A SYMBOL BASED RELO */ struct c6000_relo_record { s32 vaddr; s32 symndx; #ifndef _BIG_ENDIAN u16 disp; u16 type; #else u16 type; u16 disp; #endif }; struct c6000_gen_code { struct tramp_gen_code_hdr hdr; u32 tramp_instrs[C6X_TRAMP_WORD_COUNT]; struct c6000_relo_record relos[C6X_TRAMP_MAX_RELOS]; }; /* Hash mapping for relos that can cause trampolines. */ static const u16 tramp_map[] = { 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 0, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535 }; static const struct c6000_gen_code tramp_gen_info[] = { /* Tramp caused by R_C60PCR21 */ { /* Header - 8 instructions, 2 relos */ { sizeof(u32) * C6X_TRAMP_WORD_COUNT, 2, FIELD_OFFSET(struct c6000_gen_code, relos) }, /* Trampoline instructions */ { 0x053C54F7, /* STW.D2T2 B10, *sp--[2] */ 0x0500002A, /* || MVK.S2 <blank>, B10 */ 0x0500006A, /* MVKH.S2 <blank>, B10 */ 0x00280362, /* B.S2 B10 */ 0x053C52E6, /* LDW.D2T2 *++sp[2], B10 */ 0x00006000, /* NOP 4 */ 0x00000000, /* NOP */ 0x00000000 /* NOP */ }, /* Relocations */ { {4, 0, 0, R_C60LO16}, {8, 0, 0, R_C60HI16}, {0, 0, 0, 0x0000}, {0, 0, 0, 0x0000}, {0, 0, 0, 0x0000}, {0, 0, 0, 0x0000}, {0, 0, 0, 0x0000}, {0, 0, 0, 0x0000} } } }; /* TARGET SPECIFIC FUNCTIONS THAT MUST BE DEFINED */ static u32 tramp_size_get(void) { return sizeof(u32) * C6X_TRAMP_WORD_COUNT; } static u32 tramp_img_pkt_size_get(void) { return sizeof(struct c6000_gen_code); }
gpl-2.0
aopp/android_kernel_asus_grouper
arch/alpha/kernel/io.c
13860
12416
/* * Alpha IO and memory functions. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #include <asm/io.h> /* Out-of-line versions of the i/o routines that redirect into the platform-specific version. Note that "platform-specific" may mean "generic", which bumps through the machine vector. */ unsigned int ioread8(void __iomem *addr) { unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } unsigned int ioread16(void __iomem *addr) { unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } unsigned int ioread32(void __iomem *addr) { unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } void iowrite8(u8 b, void __iomem *addr) { IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); mb(); } void iowrite16(u16 b, void __iomem *addr) { IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); mb(); } void iowrite32(u32 b, void __iomem *addr) { IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); mb(); } EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite32); u8 inb(unsigned long port) { return ioread8(ioport_map(port, 1)); } u16 inw(unsigned long port) { return ioread16(ioport_map(port, 2)); } u32 inl(unsigned long port) { return ioread32(ioport_map(port, 4)); } void outb(u8 b, unsigned long port) { iowrite8(b, ioport_map(port, 1)); } void outw(u16 b, unsigned long port) { iowrite16(b, ioport_map(port, 2)); } void outl(u32 b, unsigned long port) { iowrite32(b, ioport_map(port, 4)); } EXPORT_SYMBOL(inb); EXPORT_SYMBOL(inw); EXPORT_SYMBOL(inl); EXPORT_SYMBOL(outb); EXPORT_SYMBOL(outw); EXPORT_SYMBOL(outl); u8 __raw_readb(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readb)(addr); } u16 __raw_readw(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readw)(addr); } u32 __raw_readl(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readl)(addr); } u64 __raw_readq(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readq)(addr); } void __raw_writeb(u8 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writeb)(b, addr); } void __raw_writew(u16 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writew)(b, addr); } void __raw_writel(u32 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writel)(b, addr); } void __raw_writeq(u64 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writeq)(b, addr); } EXPORT_SYMBOL(__raw_readb); EXPORT_SYMBOL(__raw_readw); EXPORT_SYMBOL(__raw_readl); EXPORT_SYMBOL(__raw_readq); EXPORT_SYMBOL(__raw_writeb); EXPORT_SYMBOL(__raw_writew); EXPORT_SYMBOL(__raw_writel); EXPORT_SYMBOL(__raw_writeq); u8 readb(const volatile void __iomem *addr) { u8 ret = __raw_readb(addr); mb(); return ret; } u16 readw(const volatile void __iomem *addr) { u16 ret = __raw_readw(addr); mb(); return ret; } u32 readl(const volatile void __iomem *addr) { u32 ret = __raw_readl(addr); mb(); return ret; } u64 readq(const volatile void __iomem *addr) { u64 ret = __raw_readq(addr); mb(); return ret; } void writeb(u8 b, volatile void __iomem *addr) { __raw_writeb(b, addr); mb(); } void writew(u16 b, volatile void __iomem *addr) { __raw_writew(b, addr); mb(); } void writel(u32 b, volatile void __iomem *addr) { __raw_writel(b, addr); mb(); } void writeq(u64 b, volatile void __iomem *addr) { __raw_writeq(b, addr); mb(); } EXPORT_SYMBOL(readb); EXPORT_SYMBOL(readw); EXPORT_SYMBOL(readl); EXPORT_SYMBOL(readq); EXPORT_SYMBOL(writeb); EXPORT_SYMBOL(writew); EXPORT_SYMBOL(writel); EXPORT_SYMBOL(writeq); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. */ void ioread8_rep(void __iomem *port, void *dst, unsigned long count) { while ((unsigned long)dst & 0x3) { if (!count) return; count--; *(unsigned char *)dst = ioread8(port); dst += 1; } while (count >= 4) { unsigned int w; count -= 4; w = ioread8(port); w |= ioread8(port) << 8; w |= ioread8(port) << 16; w |= ioread8(port) << 24; *(unsigned int *)dst = w; dst += 4; } while (count) { --count; *(unsigned char *)dst = ioread8(port); dst += 1; } } void insb(unsigned long port, void *dst, unsigned long count) { ioread8_rep(ioport_map(port, 1), dst, count); } EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(insb); /* * Read COUNT 16-bit words from port PORT into memory starting at * SRC. SRC must be at least short aligned. This is used by the * IDE driver to read disk sectors. Performance is important, but * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things. */ void ioread16_rep(void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { if (!count) return; BUG_ON((unsigned long)dst & 0x1); count--; *(unsigned short *)dst = ioread16(port); dst += 2; } while (count >= 2) { unsigned int w; count -= 2; w = ioread16(port); w |= ioread16(port) << 16; *(unsigned int *)dst = w; dst += 4; } if (count) { *(unsigned short*)dst = ioread16(port); } } void insw(unsigned long port, void *dst, unsigned long count) { ioread16_rep(ioport_map(port, 2), dst, count); } EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(insw); /* * Read COUNT 32-bit words from port PORT into memory starting at * SRC. Now works with any alignment in SRC. Performance is important, * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things. */ void ioread32_rep(void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); }; ((struct S *)dst)->x = ioread32(port); dst += 4; } } else { /* Buffer 32-bit aligned. */ while (count--) { *(unsigned int *)dst = ioread32(port); dst += 4; } } } void insl(unsigned long port, void *dst, unsigned long count) { ioread32_rep(ioport_map(port, 4), dst, count); } EXPORT_SYMBOL(ioread32_rep); EXPORT_SYMBOL(insl); /* * Like insb but in the opposite direction. * Don't worry as much about doing aligned memory transfers: * doing byte reads the "slow" way isn't nearly as slow as * doing byte writes the slow way (no r-m-w cycle). */ void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) { const unsigned char *src = xsrc; while (count--) iowrite8(*src++, port); } void outsb(unsigned long port, const void *src, unsigned long count) { iowrite8_rep(ioport_map(port, 1), src, count); } EXPORT_SYMBOL(iowrite8_rep); EXPORT_SYMBOL(outsb); /* * Like insw but in the opposite direction. This is used by the IDE * driver to write disk sectors. Performance is important, but the * interfaces seems to be slow: just using the inlined version of the * outw() breaks things. */ void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) { if (unlikely((unsigned long)src & 0x3)) { if (!count) return; BUG_ON((unsigned long)src & 0x1); iowrite16(*(unsigned short *)src, port); src += 2; --count; } while (count >= 2) { unsigned int w; count -= 2; w = *(unsigned int *)src; src += 4; iowrite16(w >> 0, port); iowrite16(w >> 16, port); } if (count) { iowrite16(*(unsigned short *)src, port); } } void outsw(unsigned long port, const void *src, unsigned long count) { iowrite16_rep(ioport_map(port, 2), src, count); } EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(outsw); /* * Like insl but in the opposite direction. This is used by the IDE * driver to write disk sectors. Works with any alignment in SRC. * Performance is important, but the interfaces seems to be slow: * just using the inlined version of the outl() breaks things. */ void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) { if (unlikely((unsigned long)src & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); }; iowrite32(((struct S *)src)->x, port); src += 4; } } else { /* Buffer 32-bit aligned. */ while (count--) { iowrite32(*(unsigned int *)src, port); src += 4; } } } void outsl(unsigned long port, const void *src, unsigned long count) { iowrite32_rep(ioport_map(port, 4), src, count); } EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(outsl); /* * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ void memcpy_fromio(void *to, const volatile void __iomem *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { *(u64 *)to = __raw_readq(from); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { *(u32 *)to = __raw_readl(from); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { *(u16 *)to = __raw_readw(from); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { *(u8 *) to = __raw_readb(from); count--; to++; from++; } mb(); } EXPORT_SYMBOL(memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. * This needs to be optimized. */ void memcpy_toio(volatile void __iomem *to, const void *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ /* FIXME -- align FROM. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { __raw_writeq(*(const u64 *)from, to); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { __raw_writel(*(const u32 *)from, to); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { __raw_writew(*(const u16 *)from, to); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { __raw_writeb(*(const u8 *) from, to); count--; to++; from++; } mb(); } EXPORT_SYMBOL(memcpy_toio); /* * "memset" on IO memory space. */ void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) { /* Handle any initial odd byte */ if (count > 0 && ((u64)to & 1)) { __raw_writeb(c, to); to++; count--; } /* Handle any initial odd halfword */ if (count >= 2 && ((u64)to & 2)) { __raw_writew(c, to); to += 2; count -= 2; } /* Handle any initial odd word */ if (count >= 4 && ((u64)to & 4)) { __raw_writel(c, to); to += 4; count -= 4; } /* Handle all full-sized quadwords: we're aligned (or have a small count) */ count -= 8; if (count >= 0) { do { __raw_writeq(c, to); to += 8; count -= 8; } while (count >= 0); } count += 8; /* The tail is word-aligned if we still have count >= 4 */ if (count >= 4) { __raw_writel(c, to); to += 4; count -= 4; } /* The tail is half-word aligned if we have count >= 2 */ if (count >= 2) { __raw_writew(c, to); to += 2; count -= 2; } /* And finally, one last byte.. */ if (count) { __raw_writeb(c, to); } mb(); } EXPORT_SYMBOL(_memset_c_io); /* A version of memcpy used by the vga console routines to move data around arbitrarily between screen and main memory. */ void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) { const u16 __iomem *ios = (const u16 __iomem *) s; u16 __iomem *iod = (u16 __iomem *) d; int s_isio = __is_ioaddr(s); int d_isio = __is_ioaddr(d); if (s_isio) { if (d_isio) { /* FIXME: Should handle unaligned ops and operation widening. */ count /= 2; while (count--) { u16 tmp = __raw_readw(ios++); __raw_writew(tmp, iod++); } } else memcpy_fromio(d, ios, count); } else { if (d_isio) memcpy_toio(iod, s, count); else memcpy(d, s, count); } } EXPORT_SYMBOL(scr_memcpyw); void __iomem *ioport_map(unsigned long port, unsigned int size) { return IO_CONCAT(__IO_PREFIX,ioportmap) (port); } void ioport_unmap(void __iomem *addr) { } EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap);
gpl-2.0
dasago13/android_kernel_lenovo_s650
arch/alpha/kernel/io.c
13860
12416
/* * Alpha IO and memory functions. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #include <asm/io.h> /* Out-of-line versions of the i/o routines that redirect into the platform-specific version. Note that "platform-specific" may mean "generic", which bumps through the machine vector. */ unsigned int ioread8(void __iomem *addr) { unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } unsigned int ioread16(void __iomem *addr) { unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } unsigned int ioread32(void __iomem *addr) { unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } void iowrite8(u8 b, void __iomem *addr) { IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); mb(); } void iowrite16(u16 b, void __iomem *addr) { IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); mb(); } void iowrite32(u32 b, void __iomem *addr) { IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); mb(); } EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite32); u8 inb(unsigned long port) { return ioread8(ioport_map(port, 1)); } u16 inw(unsigned long port) { return ioread16(ioport_map(port, 2)); } u32 inl(unsigned long port) { return ioread32(ioport_map(port, 4)); } void outb(u8 b, unsigned long port) { iowrite8(b, ioport_map(port, 1)); } void outw(u16 b, unsigned long port) { iowrite16(b, ioport_map(port, 2)); } void outl(u32 b, unsigned long port) { iowrite32(b, ioport_map(port, 4)); } EXPORT_SYMBOL(inb); EXPORT_SYMBOL(inw); EXPORT_SYMBOL(inl); EXPORT_SYMBOL(outb); EXPORT_SYMBOL(outw); EXPORT_SYMBOL(outl); u8 __raw_readb(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readb)(addr); } u16 __raw_readw(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readw)(addr); } u32 __raw_readl(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readl)(addr); } u64 __raw_readq(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readq)(addr); } void __raw_writeb(u8 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writeb)(b, addr); } void __raw_writew(u16 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writew)(b, addr); } void __raw_writel(u32 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writel)(b, addr); } void __raw_writeq(u64 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writeq)(b, addr); } EXPORT_SYMBOL(__raw_readb); EXPORT_SYMBOL(__raw_readw); EXPORT_SYMBOL(__raw_readl); EXPORT_SYMBOL(__raw_readq); EXPORT_SYMBOL(__raw_writeb); EXPORT_SYMBOL(__raw_writew); EXPORT_SYMBOL(__raw_writel); EXPORT_SYMBOL(__raw_writeq); u8 readb(const volatile void __iomem *addr) { u8 ret = __raw_readb(addr); mb(); return ret; } u16 readw(const volatile void __iomem *addr) { u16 ret = __raw_readw(addr); mb(); return ret; } u32 readl(const volatile void __iomem *addr) { u32 ret = __raw_readl(addr); mb(); return ret; } u64 readq(const volatile void __iomem *addr) { u64 ret = __raw_readq(addr); mb(); return ret; } void writeb(u8 b, volatile void __iomem *addr) { __raw_writeb(b, addr); mb(); } void writew(u16 b, volatile void __iomem *addr) { __raw_writew(b, addr); mb(); } void writel(u32 b, volatile void __iomem *addr) { __raw_writel(b, addr); mb(); } void writeq(u64 b, volatile void __iomem *addr) { __raw_writeq(b, addr); mb(); } EXPORT_SYMBOL(readb); EXPORT_SYMBOL(readw); EXPORT_SYMBOL(readl); EXPORT_SYMBOL(readq); EXPORT_SYMBOL(writeb); EXPORT_SYMBOL(writew); EXPORT_SYMBOL(writel); EXPORT_SYMBOL(writeq); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. */ void ioread8_rep(void __iomem *port, void *dst, unsigned long count) { while ((unsigned long)dst & 0x3) { if (!count) return; count--; *(unsigned char *)dst = ioread8(port); dst += 1; } while (count >= 4) { unsigned int w; count -= 4; w = ioread8(port); w |= ioread8(port) << 8; w |= ioread8(port) << 16; w |= ioread8(port) << 24; *(unsigned int *)dst = w; dst += 4; } while (count) { --count; *(unsigned char *)dst = ioread8(port); dst += 1; } } void insb(unsigned long port, void *dst, unsigned long count) { ioread8_rep(ioport_map(port, 1), dst, count); } EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(insb); /* * Read COUNT 16-bit words from port PORT into memory starting at * SRC. SRC must be at least short aligned. This is used by the * IDE driver to read disk sectors. Performance is important, but * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things. */ void ioread16_rep(void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { if (!count) return; BUG_ON((unsigned long)dst & 0x1); count--; *(unsigned short *)dst = ioread16(port); dst += 2; } while (count >= 2) { unsigned int w; count -= 2; w = ioread16(port); w |= ioread16(port) << 16; *(unsigned int *)dst = w; dst += 4; } if (count) { *(unsigned short*)dst = ioread16(port); } } void insw(unsigned long port, void *dst, unsigned long count) { ioread16_rep(ioport_map(port, 2), dst, count); } EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(insw); /* * Read COUNT 32-bit words from port PORT into memory starting at * SRC. Now works with any alignment in SRC. Performance is important, * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things. */ void ioread32_rep(void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); }; ((struct S *)dst)->x = ioread32(port); dst += 4; } } else { /* Buffer 32-bit aligned. */ while (count--) { *(unsigned int *)dst = ioread32(port); dst += 4; } } } void insl(unsigned long port, void *dst, unsigned long count) { ioread32_rep(ioport_map(port, 4), dst, count); } EXPORT_SYMBOL(ioread32_rep); EXPORT_SYMBOL(insl); /* * Like insb but in the opposite direction. * Don't worry as much about doing aligned memory transfers: * doing byte reads the "slow" way isn't nearly as slow as * doing byte writes the slow way (no r-m-w cycle). */ void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) { const unsigned char *src = xsrc; while (count--) iowrite8(*src++, port); } void outsb(unsigned long port, const void *src, unsigned long count) { iowrite8_rep(ioport_map(port, 1), src, count); } EXPORT_SYMBOL(iowrite8_rep); EXPORT_SYMBOL(outsb); /* * Like insw but in the opposite direction. This is used by the IDE * driver to write disk sectors. Performance is important, but the * interfaces seems to be slow: just using the inlined version of the * outw() breaks things. */ void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) { if (unlikely((unsigned long)src & 0x3)) { if (!count) return; BUG_ON((unsigned long)src & 0x1); iowrite16(*(unsigned short *)src, port); src += 2; --count; } while (count >= 2) { unsigned int w; count -= 2; w = *(unsigned int *)src; src += 4; iowrite16(w >> 0, port); iowrite16(w >> 16, port); } if (count) { iowrite16(*(unsigned short *)src, port); } } void outsw(unsigned long port, const void *src, unsigned long count) { iowrite16_rep(ioport_map(port, 2), src, count); } EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(outsw); /* * Like insl but in the opposite direction. This is used by the IDE * driver to write disk sectors. Works with any alignment in SRC. * Performance is important, but the interfaces seems to be slow: * just using the inlined version of the outl() breaks things. */ void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) { if (unlikely((unsigned long)src & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); }; iowrite32(((struct S *)src)->x, port); src += 4; } } else { /* Buffer 32-bit aligned. */ while (count--) { iowrite32(*(unsigned int *)src, port); src += 4; } } } void outsl(unsigned long port, const void *src, unsigned long count) { iowrite32_rep(ioport_map(port, 4), src, count); } EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(outsl); /* * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ void memcpy_fromio(void *to, const volatile void __iomem *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { *(u64 *)to = __raw_readq(from); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { *(u32 *)to = __raw_readl(from); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { *(u16 *)to = __raw_readw(from); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { *(u8 *) to = __raw_readb(from); count--; to++; from++; } mb(); } EXPORT_SYMBOL(memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. * This needs to be optimized. */ void memcpy_toio(volatile void __iomem *to, const void *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ /* FIXME -- align FROM. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { __raw_writeq(*(const u64 *)from, to); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { __raw_writel(*(const u32 *)from, to); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { __raw_writew(*(const u16 *)from, to); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { __raw_writeb(*(const u8 *) from, to); count--; to++; from++; } mb(); } EXPORT_SYMBOL(memcpy_toio); /* * "memset" on IO memory space. */ void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) { /* Handle any initial odd byte */ if (count > 0 && ((u64)to & 1)) { __raw_writeb(c, to); to++; count--; } /* Handle any initial odd halfword */ if (count >= 2 && ((u64)to & 2)) { __raw_writew(c, to); to += 2; count -= 2; } /* Handle any initial odd word */ if (count >= 4 && ((u64)to & 4)) { __raw_writel(c, to); to += 4; count -= 4; } /* Handle all full-sized quadwords: we're aligned (or have a small count) */ count -= 8; if (count >= 0) { do { __raw_writeq(c, to); to += 8; count -= 8; } while (count >= 0); } count += 8; /* The tail is word-aligned if we still have count >= 4 */ if (count >= 4) { __raw_writel(c, to); to += 4; count -= 4; } /* The tail is half-word aligned if we have count >= 2 */ if (count >= 2) { __raw_writew(c, to); to += 2; count -= 2; } /* And finally, one last byte.. */ if (count) { __raw_writeb(c, to); } mb(); } EXPORT_SYMBOL(_memset_c_io); /* A version of memcpy used by the vga console routines to move data around arbitrarily between screen and main memory. */ void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) { const u16 __iomem *ios = (const u16 __iomem *) s; u16 __iomem *iod = (u16 __iomem *) d; int s_isio = __is_ioaddr(s); int d_isio = __is_ioaddr(d); if (s_isio) { if (d_isio) { /* FIXME: Should handle unaligned ops and operation widening. */ count /= 2; while (count--) { u16 tmp = __raw_readw(ios++); __raw_writew(tmp, iod++); } } else memcpy_fromio(d, ios, count); } else { if (d_isio) memcpy_toio(iod, s, count); else memcpy(d, s, count); } } EXPORT_SYMBOL(scr_memcpyw); void __iomem *ioport_map(unsigned long port, unsigned int size) { return IO_CONCAT(__IO_PREFIX,ioportmap) (port); } void ioport_unmap(void __iomem *addr) { } EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap);
gpl-2.0
FAlinux-SoftwareinLife/silfa
OS/uboot/board/keymile/common/ivm.c
37
9159
/* * (C) Copyright 2011 * Holger Brunck, Keymile GmbH Hannover, holger.brunck@keymile.com * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <hush.h> #include <i2c.h> #include "common.h" int ivm_calc_crc(unsigned char *buf, int len) { const unsigned short crc_tab[16] = { 0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401, 0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400}; unsigned short crc = 0; /* final result */ unsigned short r1 = 0; /* temp */ unsigned char byte = 0; /* input buffer */ int i; /* calculate CRC from array data */ for (i = 0; i < len; i++) { byte = buf[i]; /* lower 4 bits */ r1 = crc_tab[crc & 0xF]; crc = ((crc) >> 4) & 0x0FFF; crc = crc ^ r1 ^ crc_tab[byte & 0xF]; /* upper 4 bits */ r1 = crc_tab[crc & 0xF]; crc = (crc >> 4) & 0x0FFF; crc = crc ^ r1 ^ crc_tab[(byte >> 4) & 0xF]; } return crc; } static int ivm_set_value(char *name, char *value) { char tempbuf[256]; if (value != NULL) { sprintf(tempbuf, "%s=%s", name, value); return set_local_var(tempbuf, 0); } else { unset_local_var(name); } return 0; } static int ivm_get_value(unsigned char *buf, int len, char *name, int off, int check) { unsigned short val; unsigned char valbuf[30]; if ((buf[off + 0] != buf[off + 2]) && (buf[off + 2] != buf[off + 4])) { printf("%s Error corrupted %s\n", __func__, name); val = -1; } else { val = buf[off + 0] + (buf[off + 1] << 8); if ((val == 0) && (check == 1)) val = -1; } sprintf((char *)valbuf, "%x", val); ivm_set_value(name, (char *)valbuf); return val; } #define INV_BLOCKSIZE 0x100 #define INV_DATAADDRESS 0x21 #define INVENTORYDATASIZE (INV_BLOCKSIZE - INV_DATAADDRESS - 3) #define IVM_POS_SHORT_TEXT 0 #define IVM_POS_MANU_ID 1 #define IVM_POS_MANU_SERIAL 2 #define IVM_POS_PART_NUMBER 3 #define IVM_POS_BUILD_STATE 4 #define IVM_POS_SUPPLIER_PART_NUMBER 5 #define IVM_POS_DELIVERY_DATE 6 #define IVM_POS_SUPPLIER_BUILD_STATE 7 #define IVM_POS_CUSTOMER_ID 8 #define IVM_POS_CUSTOMER_PROD_ID 9 #define IVM_POS_HISTORY 10 #define IVM_POS_SYMBOL_ONLY 11 static char convert_char(char c) { return (c < ' ' || c > '~') ? '.' : c; } static int ivm_findinventorystring(int type, unsigned char *const string, unsigned long maxlen, unsigned char *buf) { int xcode = 0; unsigned long cr = 0; unsigned long addr = INV_DATAADDRESS; unsigned long size = 0; unsigned long nr = type; int stop = 0; /* stop on semicolon */ memset(string, '\0', maxlen); switch (type) { case IVM_POS_SYMBOL_ONLY: nr = 0; stop = 1; break; default: nr = type; stop = 0; } /* Look for the requested number of CR. */ while ((cr != nr) && (addr < INVENTORYDATASIZE)) { if ((buf[addr] == '\r')) cr++; addr++; } /* * the expected number of CR was found until the end of the IVM * content --> fill string */ if (addr < INVENTORYDATASIZE) { /* Copy the IVM string in the corresponding string */ for (; (buf[addr] != '\r') && ((buf[addr] != ';') || (!stop)) && (size < (maxlen - 1) && (addr < INVENTORYDATASIZE)); addr++) { size += sprintf((char *)string + size, "%c", convert_char (buf[addr])); } /* * copy phase is done: check if everything is ok. If not, * the inventory data is most probably corrupted: tell * the world there is a problem! */ if (addr == INVENTORYDATASIZE) { xcode = -1; printf("Error end of string not found\n"); } else if ((size > (maxlen - 1)) && (buf[addr] != '\r')) { xcode = -1; printf("string too long till next CR\n"); } } else { /* * some CR are missing... * the inventory data is most probably corrupted */ xcode = -1; printf("not enough cr found\n"); } return xcode; } #define GET_STRING(name, which, len) \ if (ivm_findinventorystring(which, valbuf, len, buf) == 0) { \ ivm_set_value(name, (char *)valbuf); \ } static int ivm_check_crc(unsigned char *buf, int block) { unsigned long crc; unsigned long crceeprom; crc = ivm_calc_crc(buf, CONFIG_SYS_IVM_EEPROM_PAGE_LEN - 2); crceeprom = (buf[CONFIG_SYS_IVM_EEPROM_PAGE_LEN - 1] + \ buf[CONFIG_SYS_IVM_EEPROM_PAGE_LEN - 2] * 256); if (crc != crceeprom) { if (block == 0) printf("Error CRC Block: %d EEprom: calculated: \ %lx EEprom: %lx\n", block, crc, crceeprom); return -1; } return 0; } static int calculate_mac_offset(unsigned char *valbuf, unsigned char *buf, int offset) { unsigned long val = (buf[4] << 16) + (buf[5] << 8) + buf[6]; if (offset == 0) return 0; val += offset; buf[4] = (val >> 16) & 0xff; buf[5] = (val >> 8) & 0xff; buf[6] = val & 0xff; sprintf((char *)valbuf, "%pM", buf + 1); return 0; } static int ivm_analyze_block2(unsigned char *buf, int len) { unsigned char valbuf[CONFIG_SYS_IVM_EEPROM_PAGE_LEN]; unsigned long count; /* IVM_MAC Adress begins at offset 1 */ sprintf((char *)valbuf, "%pM", buf + 1); ivm_set_value("IVM_MacAddress", (char *)valbuf); /* if an offset is defined, add it */ calculate_mac_offset(buf, valbuf, CONFIG_PIGGY_MAC_ADRESS_OFFSET); #ifdef MACH_TYPE_KM_KIRKWOOD setenv((char *)"ethaddr", (char *)valbuf); #else if (getenv("ethaddr") == NULL) setenv((char *)"ethaddr", (char *)valbuf); #endif #ifdef CONFIG_KMVECT1 /* KMVECT1 has two ethernet interfaces */ if (getenv("eth1addr") == NULL) { calculate_mac_offset(buf, valbuf, 1); setenv((char *)"eth1addr", (char *)valbuf); } #endif /* IVM_MacCount */ count = (buf[10] << 24) + (buf[11] << 16) + (buf[12] << 8) + buf[13]; if (count == 0xffffffff) count = 1; sprintf((char *)valbuf, "%lx", count); ivm_set_value("IVM_MacCount", (char *)valbuf); return 0; } int ivm_analyze_eeprom(unsigned char *buf, int len) { unsigned short val; unsigned char valbuf[CONFIG_SYS_IVM_EEPROM_PAGE_LEN]; unsigned char *tmp; if (ivm_check_crc(buf, 0) != 0) return -1; ivm_get_value(buf, CONFIG_SYS_IVM_EEPROM_PAGE_LEN, "IVM_BoardId", 0, 1); val = ivm_get_value(buf, CONFIG_SYS_IVM_EEPROM_PAGE_LEN, "IVM_HWKey", 6, 1); if (val != 0xffff) { sprintf((char *)valbuf, "%x", ((val / 100) % 10)); ivm_set_value("IVM_HWVariant", (char *)valbuf); sprintf((char *)valbuf, "%x", (val % 100)); ivm_set_value("IVM_HWVersion", (char *)valbuf); } ivm_get_value(buf, CONFIG_SYS_IVM_EEPROM_PAGE_LEN, "IVM_Functions", 12, 0); GET_STRING("IVM_Symbol", IVM_POS_SYMBOL_ONLY, 8) GET_STRING("IVM_DeviceName", IVM_POS_SHORT_TEXT, 64) tmp = (unsigned char *) getenv("IVM_DeviceName"); if (tmp) { int len = strlen((char *)tmp); int i = 0; while (i < len) { if (tmp[i] == ';') { ivm_set_value("IVM_ShortText", (char *)&tmp[i + 1]); break; } i++; } if (i >= len) ivm_set_value("IVM_ShortText", NULL); } else { ivm_set_value("IVM_ShortText", NULL); } GET_STRING("IVM_ManufacturerID", IVM_POS_MANU_ID, 32) GET_STRING("IVM_ManufacturerSerialNumber", IVM_POS_MANU_SERIAL, 20) GET_STRING("IVM_ManufacturerPartNumber", IVM_POS_PART_NUMBER, 32) GET_STRING("IVM_ManufacturerBuildState", IVM_POS_BUILD_STATE, 32) GET_STRING("IVM_SupplierPartNumber", IVM_POS_SUPPLIER_PART_NUMBER, 32) GET_STRING("IVM_DelieveryDate", IVM_POS_DELIVERY_DATE, 32) GET_STRING("IVM_SupplierBuildState", IVM_POS_SUPPLIER_BUILD_STATE, 32) GET_STRING("IVM_CustomerID", IVM_POS_CUSTOMER_ID, 32) GET_STRING("IVM_CustomerProductID", IVM_POS_CUSTOMER_PROD_ID, 32) if (ivm_check_crc(&buf[CONFIG_SYS_IVM_EEPROM_PAGE_LEN * 2], 2) != 0) return 0; ivm_analyze_block2(&buf[CONFIG_SYS_IVM_EEPROM_PAGE_LEN * 2], CONFIG_SYS_IVM_EEPROM_PAGE_LEN); return 0; } int ivm_read_eeprom(void) { #if defined(CONFIG_I2C_MUX) I2C_MUX_DEVICE *dev = NULL; #endif uchar i2c_buffer[CONFIG_SYS_IVM_EEPROM_MAX_LEN]; uchar *buf; unsigned long dev_addr = CONFIG_SYS_IVM_EEPROM_ADR; int ret; #if defined(CONFIG_I2C_MUX) /* First init the Bus, select the Bus */ buf = (unsigned char *) getenv("EEprom_ivm"); if (buf != NULL) dev = i2c_mux_ident_muxstring(buf); if (dev == NULL) { printf("Error couldnt add Bus for IVM\n"); return -1; } i2c_set_bus_num(dev->busid); #endif /* add deblocking here */ i2c_make_abort(); ret = i2c_read(dev_addr, 0, 1, i2c_buffer, CONFIG_SYS_IVM_EEPROM_MAX_LEN); if (ret != 0) { printf("Error reading EEprom\n"); return -2; } return ivm_analyze_eeprom(i2c_buffer, CONFIG_SYS_IVM_EEPROM_MAX_LEN); }
gpl-2.0
12thmantec/linux-3.5
arch/x86/kernel/reboot.c
37
21190
#include <linux/module.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/pm.h> #include <linux/efi.h> #include <linux/dmi.h> #include <linux/sched.h> #include <linux/tboot.h> #include <linux/delay.h> #include <acpi/reboot.h> #include <asm/io.h> #include <asm/apic.h> #include <asm/desc.h> #include <asm/hpet.h> #include <asm/pgtable.h> #include <asm/proto.h> #include <asm/reboot_fixups.h> #include <asm/reboot.h> #include <asm/pci_x86.h> #include <asm/virtext.h> #include <asm/cpu.h> #include <asm/nmi.h> #ifdef CONFIG_X86_32 # include <linux/ctype.h> # include <linux/mc146818rtc.h> # include <asm/realmode.h> #else # include <asm/x86_init.h> #endif /* * Power off function, if any */ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); static const struct desc_ptr no_idt = {}; static int reboot_mode; enum reboot_type reboot_type = BOOT_ACPI; int reboot_force; /* * This variable is used privately to keep track of whether or not * reboot_type is still set to its default value (i.e., reboot= hasn't * been set on the command line). This is needed so that we can * suppress DMI scanning for reboot quirks. Without it, it's * impossible to override a faulty reboot quirk without recompiling. */ static int reboot_default = 1; #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) static int reboot_cpu = -1; #endif /* * This is set if we need to go through the 'emergency' path. * When machine_emergency_restart() is called, we may be on * an inconsistent state and won't be able to do a clean cleanup */ static int reboot_emergency; /* This is set by the PCI code if either type 1 or type 2 PCI is detected */ bool port_cf9_safe = false; /* * reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci] * warm Don't set the cold reboot flag * cold Set the cold reboot flag * bios Reboot by jumping through the BIOS (only for X86_32) * smp Reboot by executing reset on BSP or other CPU (only for X86_32) * triple Force a triple fault (init) * kbd Use the keyboard controller. cold reset (default) * acpi Use the RESET_REG in the FADT * efi Use efi reset_system runtime service * pci Use the so-called "PCI reset register", CF9 * force Avoid anything that could hang. */ static int __init reboot_setup(char *str) { for (;;) { /* * Having anything passed on the command line via * reboot= will cause us to disable DMI checking * below. */ reboot_default = 0; switch (*str) { case 'w': reboot_mode = 0x1234; break; case 'c': reboot_mode = 0; break; #ifdef CONFIG_X86_32 #ifdef CONFIG_SMP case 's': if (isdigit(*(str+1))) { reboot_cpu = (int) (*(str+1) - '0'); if (isdigit(*(str+2))) reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0'); } /* * We will leave sorting out the final value * when we are ready to reboot, since we might not * have detected BSP APIC ID or smp_num_cpu */ break; #endif /* CONFIG_SMP */ case 'b': #endif case 'a': case 'k': case 't': case 'e': case 'p': reboot_type = *str; break; case 'f': reboot_force = 1; break; } str = strchr(str, ','); if (str) str++; else break; } return 1; } __setup("reboot=", reboot_setup); #ifdef CONFIG_X86_32 /* * Reboot options and system auto-detection code provided by * Dell Inc. so their systems "just work". :-) */ /* * Some machines require the "reboot=b" or "reboot=k" commandline options, * this quirk makes that automatic. */ static int __init set_bios_reboot(const struct dmi_system_id *d) { if (reboot_type != BOOT_BIOS) { reboot_type = BOOT_BIOS; printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident); } return 0; } void machine_real_restart(unsigned int type) { void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int)) real_mode_header->machine_real_restart_asm; local_irq_disable(); /* * Write zero to CMOS register number 0x0f, which the BIOS POST * routine will recognize as telling it to do a proper reboot. (Well * that's what this book in front of me says -- it may only apply to * the Phoenix BIOS though, it's not clear). At the same time, * disable NMIs by setting the top bit in the CMOS address register, * as we're about to do peculiar things to the CPU. I'm not sure if * `outb_p' is needed instead of just `outb'. Use it to be on the * safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.) */ spin_lock(&rtc_lock); CMOS_WRITE(0x00, 0x8f); spin_unlock(&rtc_lock); /* * Switch back to the initial page table. */ load_cr3(initial_page_table); /* * Write 0x1234 to absolute memory location 0x472. The BIOS reads * this on booting to tell it to "Bypass memory test (also warm * boot)". This seems like a fairly standard thing that gets set by * REBOOT.COM programs, and the previous reset routine did this * too. */ *((unsigned short *)0x472) = reboot_mode; /* Jump to the identity-mapped low memory code */ restart_lowmem(type); } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(machine_real_restart); #endif #endif /* CONFIG_X86_32 */ /* * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot */ static int __init set_pci_reboot(const struct dmi_system_id *d) { if (reboot_type != BOOT_CF9) { reboot_type = BOOT_CF9; printk(KERN_INFO "%s series board detected. " "Selecting PCI-method for reboots.\n", d->ident); } return 0; } static int __init set_kbd_reboot(const struct dmi_system_id *d) { if (reboot_type != BOOT_KBD) { reboot_type = BOOT_KBD; printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident); } return 0; } /* * This is a single dmi_table handling all reboot quirks. Note that * REBOOT_BIOS is only available for 32bit */ static struct dmi_system_id __initdata reboot_dmi_table[] = { #ifdef CONFIG_X86_32 { /* Handle problems with rebooting on Dell E520's */ .callback = set_bios_reboot, .ident = "Dell E520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"), }, }, { /* Handle problems with rebooting on Dell 1300's */ .callback = set_bios_reboot, .ident = "Dell PowerEdge 1300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), }, }, { /* Handle problems with rebooting on Dell 300's */ .callback = set_bios_reboot, .ident = "Dell PowerEdge 300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), }, }, { /* Handle problems with rebooting on Dell Optiplex 745's SFF */ .callback = set_bios_reboot, .ident = "Dell OptiPlex 745", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), }, }, { /* Handle problems with rebooting on Dell Optiplex 745's DFF */ .callback = set_bios_reboot, .ident = "Dell OptiPlex 745", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), DMI_MATCH(DMI_BOARD_NAME, "0MM599"), }, }, { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */ .callback = set_bios_reboot, .ident = "Dell OptiPlex 745", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), DMI_MATCH(DMI_BOARD_NAME, "0KW626"), }, }, { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */ .callback = set_bios_reboot, .ident = "Dell OptiPlex 330", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"), DMI_MATCH(DMI_BOARD_NAME, "0KP561"), }, }, { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */ .callback = set_bios_reboot, .ident = "Dell OptiPlex 360", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"), DMI_MATCH(DMI_BOARD_NAME, "0T656F"), }, }, { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G */ .callback = set_bios_reboot, .ident = "Dell OptiPlex 760", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), DMI_MATCH(DMI_BOARD_NAME, "0G919G"), }, }, { /* Handle problems with rebooting on Dell 2400's */ .callback = set_bios_reboot, .ident = "Dell PowerEdge 2400", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), }, }, { /* Handle problems with rebooting on Dell T5400's */ .callback = set_bios_reboot, .ident = "Dell Precision T5400", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"), }, }, { /* Handle problems with rebooting on Dell T7400's */ .callback = set_bios_reboot, .ident = "Dell Precision T7400", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T7400"), }, }, { /* Handle problems with rebooting on HP laptops */ .callback = set_bios_reboot, .ident = "HP Compaq Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), }, }, { /* Handle problems with rebooting on Dell XPS710 */ .callback = set_bios_reboot, .ident = "Dell XPS710", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), }, }, { /* Handle problems with rebooting on Dell DXP061 */ .callback = set_bios_reboot, .ident = "Dell DXP061", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), }, }, { /* Handle problems with rebooting on Sony VGN-Z540N */ .callback = set_bios_reboot, .ident = "Sony VGN-Z540N", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), }, }, { /* Handle problems with rebooting on CompuLab SBC-FITPC2 */ .callback = set_bios_reboot, .ident = "CompuLab SBC-FITPC2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"), DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), }, }, { /* Handle problems with rebooting on ASUS P4S800 */ .callback = set_bios_reboot, .ident = "ASUS P4S800", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, #endif /* CONFIG_X86_32 */ { /* Handle reboot issue on Acer Aspire one */ .callback = set_kbd_reboot, .ident = "Acer Aspire One A110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), }, }, { /* Handle problems with rebooting on Apple MacBook5 */ .callback = set_pci_reboot, .ident = "Apple MacBook5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), }, }, { /* Handle problems with rebooting on Apple MacBookPro5 */ .callback = set_pci_reboot, .ident = "Apple MacBookPro5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), }, }, { /* Handle problems with rebooting on Apple Macmini3,1 */ .callback = set_pci_reboot, .ident = "Apple Macmini3,1", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), }, }, { /* Handle problems with rebooting on the iMac9,1. */ .callback = set_pci_reboot, .ident = "Apple iMac9,1", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), }, }, { /* Handle problems with rebooting on the Latitude E6320. */ .callback = set_pci_reboot, .ident = "Dell Latitude E6320", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), }, }, { /* Handle problems with rebooting on the Latitude E5420. */ .callback = set_pci_reboot, .ident = "Dell Latitude E5420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), }, }, { /* Handle problems with rebooting on the Latitude E6420. */ .callback = set_pci_reboot, .ident = "Dell Latitude E6420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), }, }, { /* Handle problems with rebooting on the OptiPlex 990. */ .callback = set_pci_reboot, .ident = "Dell OptiPlex 990", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), }, }, { /* Handle problems with rebooting on the Precision M6600. */ .callback = set_pci_reboot, .ident = "Dell OptiPlex 990", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), }, }, { } }; static int __init reboot_init(void) { /* * Only do the DMI check if reboot_type hasn't been overridden * on the command line */ if (reboot_default) dmi_check_system(reboot_dmi_table); return 0; } core_initcall(reboot_init); static inline void kb_wait(void) { int i; for (i = 0; i < 0x10000; i++) { if ((inb(0x64) & 0x02) == 0) break; udelay(2); } } static void vmxoff_nmi(int cpu, struct pt_regs *regs) { cpu_emergency_vmxoff(); } /* Use NMIs as IPIs to tell all CPUs to disable virtualization */ static void emergency_vmx_disable_all(void) { /* Just make sure we won't change CPUs while doing this */ local_irq_disable(); /* * We need to disable VMX on all CPUs before rebooting, otherwise * we risk hanging up the machine, because the CPU ignore INIT * signals when VMX is enabled. * * We can't take any locks and we may be on an inconsistent * state, so we use NMIs as IPIs to tell the other CPUs to disable * VMX and halt. * * For safety, we will avoid running the nmi_shootdown_cpus() * stuff unnecessarily, but we don't have a way to check * if other CPUs have VMX enabled. So we will call it only if the * CPU we are running on has VMX enabled. * * We will miss cases where VMX is not enabled on all CPUs. This * shouldn't do much harm because KVM always enable VMX on all * CPUs anyway. But we can miss it on the small window where KVM * is still enabling VMX. */ if (cpu_has_vmx() && cpu_vmx_enabled()) { /* Disable VMX on this CPU. */ cpu_vmxoff(); /* Halt and disable VMX on the other CPUs */ nmi_shootdown_cpus(vmxoff_nmi); } } void __attribute__((weak)) mach_reboot_fixups(void) { } /* * Windows compatible x86 hardware expects the following on reboot: * * 1) If the FADT has the ACPI reboot register flag set, try it * 2) If still alive, write to the keyboard controller * 3) If still alive, write to the ACPI reboot register again * 4) If still alive, write to the keyboard controller again * * If the machine is still alive at this stage, it gives up. We default to * following the same pattern, except that if we're still alive after (4) we'll * try to force a triple fault and then cycle between hitting the keyboard * controller and doing that */ static void native_machine_emergency_restart(void) { int i; int attempt = 0; int orig_reboot_type = reboot_type; if (reboot_emergency) emergency_vmx_disable_all(); tboot_shutdown(TB_SHUTDOWN_REBOOT); /* Tell the BIOS if we want cold or warm reboot */ *((unsigned short *)__va(0x472)) = reboot_mode; for (;;) { /* Could also try the reset bit in the Hammer NB */ switch (reboot_type) { case BOOT_KBD: mach_reboot_fixups(); /* For board specific fixups */ for (i = 0; i < 10; i++) { kb_wait(); udelay(50); outb(0xfe, 0x64); /* Pulse reset low */ udelay(50); } if (attempt == 0 && orig_reboot_type == BOOT_ACPI) { attempt = 1; reboot_type = BOOT_ACPI; } else { reboot_type = BOOT_TRIPLE; } break; case BOOT_TRIPLE: load_idt(&no_idt); __asm__ __volatile__("int3"); reboot_type = BOOT_KBD; break; #ifdef CONFIG_X86_32 case BOOT_BIOS: machine_real_restart(MRR_BIOS); reboot_type = BOOT_KBD; break; #endif case BOOT_ACPI: acpi_reboot(); reboot_type = BOOT_KBD; break; case BOOT_EFI: if (efi_enabled) efi.reset_system(reboot_mode ? EFI_RESET_WARM : EFI_RESET_COLD, EFI_SUCCESS, 0, NULL); reboot_type = BOOT_KBD; break; case BOOT_CF9: port_cf9_safe = true; /* Fall through */ case BOOT_CF9_COND: if (port_cf9_safe) { u8 cf9 = inb(0xcf9) & ~6; outb(cf9|2, 0xcf9); /* Request hard reset */ udelay(50); outb(cf9|6, 0xcf9); /* Actually do the reset */ udelay(50); } reboot_type = BOOT_KBD; break; } } } void native_machine_shutdown(void) { /* Stop the cpus and apics */ #ifdef CONFIG_SMP /* The boot cpu is always logical cpu 0 */ int reboot_cpu_id = 0; #ifdef CONFIG_X86_32 /* See if there has been given a command line override */ if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && cpu_online(reboot_cpu)) reboot_cpu_id = reboot_cpu; #endif /* Make certain the cpu I'm about to reboot on is online */ if (!cpu_online(reboot_cpu_id)) reboot_cpu_id = smp_processor_id(); /* Make certain I only run on the appropriate processor */ set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); /* * O.K Now that I'm on the appropriate processor, stop all of the * others. Also disable the local irq to not receive the per-cpu * timer interrupt which may trigger scheduler's load balance. */ local_irq_disable(); stop_other_cpus(); #endif lapic_shutdown(); #ifdef CONFIG_X86_IO_APIC disable_IO_APIC(); #endif #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif #ifdef CONFIG_X86_64 x86_platform.iommu_shutdown(); #endif } static void __machine_emergency_restart(int emergency) { reboot_emergency = emergency; machine_ops.emergency_restart(); } static void native_machine_restart(char *__unused) { printk("machine restart\n"); if (!reboot_force) machine_shutdown(); __machine_emergency_restart(0); } static void native_machine_halt(void) { /* Stop other cpus and apics */ machine_shutdown(); tboot_shutdown(TB_SHUTDOWN_HALT); stop_this_cpu(NULL); } static void native_machine_power_off(void) { if (pm_power_off) { if (!reboot_force) machine_shutdown(); pm_power_off(); } /* A fallback in case there is no PM info available */ tboot_shutdown(TB_SHUTDOWN_HALT); } struct machine_ops machine_ops = { .power_off = native_machine_power_off, .shutdown = native_machine_shutdown, .emergency_restart = native_machine_emergency_restart, .restart = native_machine_restart, .halt = native_machine_halt, #ifdef CONFIG_KEXEC .crash_shutdown = native_machine_crash_shutdown, #endif }; void machine_power_off(void) { machine_ops.power_off(); } void machine_shutdown(void) { machine_ops.shutdown(); } void machine_emergency_restart(void) { __machine_emergency_restart(1); } void machine_restart(char *cmd) { machine_ops.restart(cmd); } void machine_halt(void) { machine_ops.halt(); } #ifdef CONFIG_KEXEC void machine_crash_shutdown(struct pt_regs *regs) { machine_ops.crash_shutdown(regs); } #endif #if defined(CONFIG_SMP) /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; static nmi_shootdown_cb shootdown_callback; static atomic_t waiting_for_crash_ipi; static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) { int cpu; cpu = raw_smp_processor_id(); /* * Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter. */ if (cpu == crashing_cpu) return NMI_HANDLED; local_irq_disable(); shootdown_callback(cpu, regs); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ halt(); for (;;) cpu_relax(); return NMI_HANDLED; } static void smp_send_nmi_allbutself(void) { apic->send_IPI_allbutself(NMI_VECTOR); } /* * Halt all other CPUs, calling the specified function on each of them * * This function can be used to halt all other CPUs on crash * or emergency reboot time. The function passed as parameter * will be called inside a NMI handler on all CPUs. */ void nmi_shootdown_cpus(nmi_shootdown_cb callback) { unsigned long msecs; local_irq_disable(); /* Make a note of crashing cpu. Will be used in NMI callback. */ crashing_cpu = safe_smp_processor_id(); shootdown_callback = callback; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); /* Would it be better to replace the trap vector here? */ if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback, NMI_FLAG_FIRST, "crash")) return; /* Return what? */ /* * Ensure the new callback function is set before sending * out the NMI */ wmb(); smp_send_nmi_allbutself(); msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); msecs--; } /* Leave the nmi callback set */ } #else /* !CONFIG_SMP */ void nmi_shootdown_cpus(nmi_shootdown_cb callback) { /* No other CPUs to shoot down */ } #endif
gpl-2.0
brink182/cfg-loader-mod
lib/libext2fs/source/e2p/iod.c
37
1519
/* * iod.c - Iterate a function on each entry of a directory * * Copyright (C) 1993, 1994 Remy Card <card@masi.ibp.fr> * Laboratoire MASI, Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * %Begin-Header% * This file may be redistributed under the terms of the GNU Library * General Public License, version 2. * %End-Header% */ /* * History: * 93/10/30 - Creation */ #include "e2p.h" #if HAVE_UNISTD_H #include <unistd.h> #endif #include <stdlib.h> #include <string.h> int iterate_on_dir (const char * dir_name, int (*func) (const char *, struct dirent *, void *), void * private) { DIR * dir; struct dirent *de, *dep; int max_len = -1, len, ret = 0; #if HAVE_PATHCONF && defined(_PC_NAME_MAX) max_len = pathconf(dir_name, _PC_NAME_MAX); #endif if (max_len == -1) { #ifdef _POSIX_NAME_MAX max_len = _POSIX_NAME_MAX; #else #ifdef NAME_MAX max_len = NAME_MAX; #else max_len = 256; #endif /* NAME_MAX */ #endif /* _POSIX_NAME_MAX */ } max_len += sizeof(struct dirent); de = malloc(max_len+1); if (!de) return -1; memset(de, 0, max_len+1); dir = opendir (dir_name); if (dir == NULL) { free(de); return -1; } while ((dep = readdir (dir))) { #ifdef HAVE_RECLEN_DIRENT len = dep->d_reclen; if (len > max_len) len = max_len; #else len = sizeof(struct dirent); #endif memcpy(de, dep, len); if ((*func)(dir_name, de, private)) ret++; } free(de); closedir(dir); return ret; }
gpl-2.0
motley-git/Kernel-GT-P7310
net/core/link_watch.c
293
5548
/* * Linux network device link state notification * * Author: * Stefan Rompf <sux@loplof.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/if.h> #include <net/sock.h> #include <net/pkt_sched.h> #include <linux/rtnetlink.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <asm/types.h> enum lw_bits { LW_URGENT = 0, }; static unsigned long linkwatch_flags; static unsigned long linkwatch_nextevent; static void linkwatch_event(struct work_struct *dummy); static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); static LIST_HEAD(lweventlist); static DEFINE_SPINLOCK(lweventlist_lock); static unsigned char default_operstate(const struct net_device *dev) { if (!netif_carrier_ok(dev)) return (dev->ifindex != dev->iflink ? IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); if (netif_dormant(dev)) return IF_OPER_DORMANT; return IF_OPER_UP; } static void rfc2863_policy(struct net_device *dev) { unsigned char operstate = default_operstate(dev); if (operstate == dev->operstate) return; write_lock_bh(&dev_base_lock); switch(dev->link_mode) { case IF_LINK_MODE_DORMANT: if (operstate == IF_OPER_UP) operstate = IF_OPER_DORMANT; break; case IF_LINK_MODE_DEFAULT: default: break; } dev->operstate = operstate; write_unlock_bh(&dev_base_lock); } static bool linkwatch_urgent_event(struct net_device *dev) { return netif_running(dev) && netif_carrier_ok(dev) && qdisc_tx_changing(dev); } static void linkwatch_add_event(struct net_device *dev) { unsigned long flags; spin_lock_irqsave(&lweventlist_lock, flags); if (list_empty(&dev->link_watch_list)) { list_add_tail(&dev->link_watch_list, &lweventlist); dev_hold(dev); } spin_unlock_irqrestore(&lweventlist_lock, flags); } static void linkwatch_schedule_work(int urgent) { unsigned long delay = linkwatch_nextevent - jiffies; if (test_bit(LW_URGENT, &linkwatch_flags)) return; /* Minimise down-time: drop delay for up event. */ if (urgent) { if (test_and_set_bit(LW_URGENT, &linkwatch_flags)) return; delay = 0; } /* If we wrap around we'll delay it by at most HZ. */ if (delay > HZ) delay = 0; /* * This is true if we've scheduled it immeditately or if we don't * need an immediate execution and it's already pending. */ if (schedule_delayed_work(&linkwatch_work, delay) == !delay) return; /* Don't bother if there is nothing urgent. */ if (!test_bit(LW_URGENT, &linkwatch_flags)) return; /* It's already running which is good enough. */ if (!cancel_delayed_work(&linkwatch_work)) return; /* Otherwise we reschedule it again for immediate exection. */ schedule_delayed_work(&linkwatch_work, 0); } static void linkwatch_do_dev(struct net_device *dev) { /* * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ smp_mb__before_clear_bit(); /* We are about to handle this device, * so new events can be accepted */ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) dev_activate(dev); else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); } static void __linkwatch_run_queue(int urgent_only) { struct net_device *dev; LIST_HEAD(wrk); /* * Limit the number of linkwatch events to one * per second so that a runaway driver does not * cause a storm of messages on the netlink * socket. This limit does not apply to up events * while the device qdisc is down. */ if (!urgent_only) linkwatch_nextevent = jiffies + HZ; /* Limit wrap-around effect on delay. */ else if (time_after(linkwatch_nextevent, jiffies + HZ)) linkwatch_nextevent = jiffies; clear_bit(LW_URGENT, &linkwatch_flags); spin_lock_irq(&lweventlist_lock); list_splice_init(&lweventlist, &wrk); while (!list_empty(&wrk)) { dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); if (urgent_only && !linkwatch_urgent_event(dev)) { list_add_tail(&dev->link_watch_list, &lweventlist); continue; } spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); spin_lock_irq(&lweventlist_lock); } if (!list_empty(&lweventlist)) linkwatch_schedule_work(0); spin_unlock_irq(&lweventlist_lock); } void linkwatch_forget_dev(struct net_device *dev) { unsigned long flags; int clean = 0; spin_lock_irqsave(&lweventlist_lock, flags); if (!list_empty(&dev->link_watch_list)) { list_del_init(&dev->link_watch_list); clean = 1; } spin_unlock_irqrestore(&lweventlist_lock, flags); if (clean) linkwatch_do_dev(dev); } /* Must be called with the rtnl semaphore held */ void linkwatch_run_queue(void) { __linkwatch_run_queue(0); } static void linkwatch_event(struct work_struct *dummy) { rtnl_lock(); __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies)); rtnl_unlock(); } void linkwatch_fire_event(struct net_device *dev) { bool urgent = linkwatch_urgent_event(dev); if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { linkwatch_add_event(dev); } else if (!urgent) return; linkwatch_schedule_work(urgent); } EXPORT_SYMBOL(linkwatch_fire_event);
gpl-2.0
bhb27/android_kernel_motorola_apq8084
sound/core/seq/seq_timer.c
293
12316
/* * ALSA sequencer Timer * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <linux/slab.h> #include "seq_timer.h" #include "seq_queue.h" #include "seq_info.h" /* allowed sequencer timer frequencies, in Hz */ #define MIN_FREQUENCY 10 #define MAX_FREQUENCY 6250 #define DEFAULT_FREQUENCY 1000 #define SKEW_BASE 0x10000 /* 16bit shift */ static void snd_seq_timer_set_tick_resolution(struct snd_seq_timer *tmr) { if (tmr->tempo < 1000000) tmr->tick.resolution = (tmr->tempo * 1000) / tmr->ppq; else { /* might overflow.. */ unsigned int s; s = tmr->tempo % tmr->ppq; s = (s * 1000) / tmr->ppq; tmr->tick.resolution = (tmr->tempo / tmr->ppq) * 1000; tmr->tick.resolution += s; } if (tmr->tick.resolution <= 0) tmr->tick.resolution = 1; snd_seq_timer_update_tick(&tmr->tick, 0); } /* create new timer (constructor) */ struct snd_seq_timer *snd_seq_timer_new(void) { struct snd_seq_timer *tmr; tmr = kzalloc(sizeof(*tmr), GFP_KERNEL); if (tmr == NULL) { snd_printd("malloc failed for snd_seq_timer_new() \n"); return NULL; } spin_lock_init(&tmr->lock); /* reset setup to defaults */ snd_seq_timer_defaults(tmr); /* reset time */ snd_seq_timer_reset(tmr); return tmr; } /* delete timer (destructor) */ void snd_seq_timer_delete(struct snd_seq_timer **tmr) { struct snd_seq_timer *t = *tmr; *tmr = NULL; if (t == NULL) { snd_printd("oops: snd_seq_timer_delete() called with NULL timer\n"); return; } t->running = 0; /* reset time */ snd_seq_timer_stop(t); snd_seq_timer_reset(t); kfree(t); } void snd_seq_timer_defaults(struct snd_seq_timer * tmr) { unsigned long flags; spin_lock_irqsave(&tmr->lock, flags); /* setup defaults */ tmr->ppq = 96; /* 96 PPQ */ tmr->tempo = 500000; /* 120 BPM */ snd_seq_timer_set_tick_resolution(tmr); tmr->running = 0; tmr->type = SNDRV_SEQ_TIMER_ALSA; tmr->alsa_id.dev_class = seq_default_timer_class; tmr->alsa_id.dev_sclass = seq_default_timer_sclass; tmr->alsa_id.card = seq_default_timer_card; tmr->alsa_id.device = seq_default_timer_device; tmr->alsa_id.subdevice = seq_default_timer_subdevice; tmr->preferred_resolution = seq_default_timer_resolution; tmr->skew = tmr->skew_base = SKEW_BASE; spin_unlock_irqrestore(&tmr->lock, flags); } static void seq_timer_reset(struct snd_seq_timer *tmr) { /* reset time & songposition */ tmr->cur_time.tv_sec = 0; tmr->cur_time.tv_nsec = 0; tmr->tick.cur_tick = 0; tmr->tick.fraction = 0; } void snd_seq_timer_reset(struct snd_seq_timer *tmr) { unsigned long flags; spin_lock_irqsave(&tmr->lock, flags); seq_timer_reset(tmr); spin_unlock_irqrestore(&tmr->lock, flags); } /* called by timer interrupt routine. the period time since previous invocation is passed */ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { unsigned long flags; struct snd_seq_queue *q = timeri->callback_data; struct snd_seq_timer *tmr; if (q == NULL) return; tmr = q->timer; if (tmr == NULL) return; spin_lock_irqsave(&tmr->lock, flags); if (!tmr->running) { spin_unlock_irqrestore(&tmr->lock, flags); return; } resolution *= ticks; if (tmr->skew != tmr->skew_base) { /* FIXME: assuming skew_base = 0x10000 */ resolution = (resolution >> 16) * tmr->skew + (((resolution & 0xffff) * tmr->skew) >> 16); } /* update timer */ snd_seq_inc_time_nsec(&tmr->cur_time, resolution); /* calculate current tick */ snd_seq_timer_update_tick(&tmr->tick, resolution); /* register actual time of this timer update */ do_gettimeofday(&tmr->last_update); spin_unlock_irqrestore(&tmr->lock, flags); /* check queues and dispatch events */ snd_seq_check_queue(q, 1, 0); } /* set current tempo */ int snd_seq_timer_set_tempo(struct snd_seq_timer * tmr, int tempo) { unsigned long flags; if (snd_BUG_ON(!tmr)) return -EINVAL; if (tempo <= 0) return -EINVAL; spin_lock_irqsave(&tmr->lock, flags); if ((unsigned int)tempo != tmr->tempo) { tmr->tempo = tempo; snd_seq_timer_set_tick_resolution(tmr); } spin_unlock_irqrestore(&tmr->lock, flags); return 0; } /* set current ppq */ int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq) { unsigned long flags; if (snd_BUG_ON(!tmr)) return -EINVAL; if (ppq <= 0) return -EINVAL; spin_lock_irqsave(&tmr->lock, flags); if (tmr->running && (ppq != tmr->ppq)) { /* refuse to change ppq on running timers */ /* because it will upset the song position (ticks) */ spin_unlock_irqrestore(&tmr->lock, flags); snd_printd("seq: cannot change ppq of a running timer\n"); return -EBUSY; } tmr->ppq = ppq; snd_seq_timer_set_tick_resolution(tmr); spin_unlock_irqrestore(&tmr->lock, flags); return 0; } /* set current tick position */ int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position) { unsigned long flags; if (snd_BUG_ON(!tmr)) return -EINVAL; spin_lock_irqsave(&tmr->lock, flags); tmr->tick.cur_tick = position; tmr->tick.fraction = 0; spin_unlock_irqrestore(&tmr->lock, flags); return 0; } /* set current real-time position */ int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position) { unsigned long flags; if (snd_BUG_ON(!tmr)) return -EINVAL; snd_seq_sanity_real_time(&position); spin_lock_irqsave(&tmr->lock, flags); tmr->cur_time = position; spin_unlock_irqrestore(&tmr->lock, flags); return 0; } /* set timer skew */ int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base) { unsigned long flags; if (snd_BUG_ON(!tmr)) return -EINVAL; /* FIXME */ if (base != SKEW_BASE) { snd_printd("invalid skew base 0x%x\n", base); return -EINVAL; } spin_lock_irqsave(&tmr->lock, flags); tmr->skew = skew; spin_unlock_irqrestore(&tmr->lock, flags); return 0; } int snd_seq_timer_open(struct snd_seq_queue *q) { struct snd_timer_instance *t; struct snd_seq_timer *tmr; char str[32]; int err; tmr = q->timer; if (snd_BUG_ON(!tmr)) return -EINVAL; if (tmr->timeri) return -EBUSY; sprintf(str, "sequencer queue %i", q->queue); if (tmr->type != SNDRV_SEQ_TIMER_ALSA) /* standard ALSA timer */ return -EINVAL; if (tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tmr->alsa_id.dev_sclass = SNDRV_TIMER_SCLASS_SEQUENCER; err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue); if (err < 0 && tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_SLAVE) { if (tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_GLOBAL || tmr->alsa_id.device != SNDRV_TIMER_GLOBAL_SYSTEM) { struct snd_timer_id tid; memset(&tid, 0, sizeof(tid)); tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL; tid.dev_sclass = SNDRV_TIMER_SCLASS_SEQUENCER; tid.card = -1; tid.device = SNDRV_TIMER_GLOBAL_SYSTEM; err = snd_timer_open(&t, str, &tid, q->queue); } } if (err < 0) { snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); return err; } t->callback = snd_seq_timer_interrupt; t->callback_data = q; t->flags |= SNDRV_TIMER_IFLG_AUTO; spin_lock_irq(&tmr->lock); tmr->timeri = t; spin_unlock_irq(&tmr->lock); return 0; } int snd_seq_timer_close(struct snd_seq_queue *q) { struct snd_seq_timer *tmr; struct snd_timer_instance *t; tmr = q->timer; if (snd_BUG_ON(!tmr)) return -EINVAL; spin_lock_irq(&tmr->lock); t = tmr->timeri; tmr->timeri = NULL; spin_unlock_irq(&tmr->lock); if (t) snd_timer_close(t); return 0; } static int seq_timer_stop(struct snd_seq_timer *tmr) { if (! tmr->timeri) return -EINVAL; if (!tmr->running) return 0; tmr->running = 0; snd_timer_pause(tmr->timeri); return 0; } int snd_seq_timer_stop(struct snd_seq_timer *tmr) { unsigned long flags; int err; spin_lock_irqsave(&tmr->lock, flags); err = seq_timer_stop(tmr); spin_unlock_irqrestore(&tmr->lock, flags); return err; } static int initialize_timer(struct snd_seq_timer *tmr) { struct snd_timer *t; unsigned long freq; t = tmr->timeri->timer; if (snd_BUG_ON(!t)) return -EINVAL; freq = tmr->preferred_resolution; if (!freq) freq = DEFAULT_FREQUENCY; else if (freq < MIN_FREQUENCY) freq = MIN_FREQUENCY; else if (freq > MAX_FREQUENCY) freq = MAX_FREQUENCY; tmr->ticks = 1; if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) { unsigned long r = t->hw.resolution; if (! r && t->hw.c_resolution) r = t->hw.c_resolution(t); if (r) { tmr->ticks = (unsigned int)(1000000000uL / (r * freq)); if (! tmr->ticks) tmr->ticks = 1; } } tmr->initialized = 1; return 0; } static int seq_timer_start(struct snd_seq_timer *tmr) { if (! tmr->timeri) return -EINVAL; if (tmr->running) seq_timer_stop(tmr); seq_timer_reset(tmr); if (initialize_timer(tmr) < 0) return -EINVAL; snd_timer_start(tmr->timeri, tmr->ticks); tmr->running = 1; do_gettimeofday(&tmr->last_update); return 0; } int snd_seq_timer_start(struct snd_seq_timer *tmr) { unsigned long flags; int err; spin_lock_irqsave(&tmr->lock, flags); err = seq_timer_start(tmr); spin_unlock_irqrestore(&tmr->lock, flags); return err; } static int seq_timer_continue(struct snd_seq_timer *tmr) { if (! tmr->timeri) return -EINVAL; if (tmr->running) return -EBUSY; if (! tmr->initialized) { seq_timer_reset(tmr); if (initialize_timer(tmr) < 0) return -EINVAL; } snd_timer_start(tmr->timeri, tmr->ticks); tmr->running = 1; do_gettimeofday(&tmr->last_update); return 0; } int snd_seq_timer_continue(struct snd_seq_timer *tmr) { unsigned long flags; int err; spin_lock_irqsave(&tmr->lock, flags); err = seq_timer_continue(tmr); spin_unlock_irqrestore(&tmr->lock, flags); return err; } /* return current 'real' time. use timeofday() to get better granularity. */ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) { snd_seq_real_time_t cur_time; unsigned long flags; spin_lock_irqsave(&tmr->lock, flags); cur_time = tmr->cur_time; if (tmr->running) { struct timeval tm; int usec; do_gettimeofday(&tm); usec = (int)(tm.tv_usec - tmr->last_update.tv_usec); if (usec < 0) { cur_time.tv_nsec += (1000000 + usec) * 1000; cur_time.tv_sec += tm.tv_sec - tmr->last_update.tv_sec - 1; } else { cur_time.tv_nsec += usec * 1000; cur_time.tv_sec += tm.tv_sec - tmr->last_update.tv_sec; } snd_seq_sanity_real_time(&cur_time); } spin_unlock_irqrestore(&tmr->lock, flags); return cur_time; } /* TODO: use interpolation on tick queue (will only be useful for very high PPQ values) */ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr) { return tmr->tick.cur_tick; } #ifdef CONFIG_PROC_FS /* exported to seq_info.c */ void snd_seq_info_timer_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int idx; struct snd_seq_queue *q; struct snd_seq_timer *tmr; struct snd_timer_instance *ti; unsigned long resolution; for (idx = 0; idx < SNDRV_SEQ_MAX_QUEUES; idx++) { q = queueptr(idx); if (q == NULL) continue; if ((tmr = q->timer) == NULL || (ti = tmr->timeri) == NULL) { queuefree(q); continue; } snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name); resolution = snd_timer_resolution(ti) * tmr->ticks; snd_iprintf(buffer, " Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000); snd_iprintf(buffer, " Skew : %u / %u\n", tmr->skew, tmr->skew_base); queuefree(q); } } #endif /* CONFIG_PROC_FS */
gpl-2.0
uwehermann/easybox-904-xdsl-firmware
linux/linux-2.6.32.32/drivers/video/console/fbcon_cw.c
805
10761
/* * linux/drivers/video/console/fbcon_ud.c -- Software Rotation - 90 degrees * * Copyright (C) 2005 Antonino Daplas <adaplas @pol.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <asm/types.h> #include "fbcon.h" #include "fbcon_rotate.h" /* * Rotation 90 degrees */ static inline void cw_update_attr(u8 *dst, u8 *src, int attribute, struct vc_data *vc) { int i, j, offset = (vc->vc_font.height < 10) ? 1 : 2; int width = (vc->vc_font.height + 7) >> 3; u8 c, t = 0, msk = ~(0xff >> offset); for (i = 0; i < vc->vc_font.width; i++) { for (j = 0; j < width; j++) { c = *src; if (attribute & FBCON_ATTRIBUTE_UNDERLINE && !j) c |= msk; if (attribute & FBCON_ATTRIBUTE_BOLD && i) c |= *(src-width); if (attribute & FBCON_ATTRIBUTE_REVERSE) c = ~c; src++; *dst++ = c; t = c; } } } static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; u32 vxres = GETVXRES(ops->p->scrollmode, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; area.dx = vxres - ((dy + height) * vc->vc_font.height); area.dy = dx * vc->vc_font.width; area.width = height * vc->vc_font.height; area.height = width * vc->vc_font.width; info->fbops->fb_copyarea(info, &area); } static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); region.dy = sx * vc->vc_font.width; region.height = width * vc->vc_font.width; region.width = height * vc->vc_font.height; region.rop = ROP_COPY; info->fbops->fb_fillrect(info, &region); } static inline void cw_putcs_aligned(struct vc_data *vc, struct fb_info *info, const u16 *s, u32 attr, u32 cnt, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { struct fbcon_ops *ops = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = (vc->vc_font.height + 7) >> 3; u8 *src; while (cnt--) { src = ops->fontbuffer + (scr_readw(s++) & charmask)*cellsize; if (attr) { cw_update_attr(buf, src, attr, vc); src = buf; } if (likely(idx == 1)) __fb_pad_aligned_buffer(dst, d_pitch, src, idx, vc->vc_font.width); else fb_pad_aligned_buffer(dst, d_pitch, src, idx, vc->vc_font.width); dst += d_pitch * vc->vc_font.width; } info->fbops->fb_imageblit(info, image); } static void cw_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg) { struct fb_image image; struct fbcon_ops *ops = info->fbcon_par; u32 width = (vc->vc_font.height + 7)/8; u32 cellsize = width * vc->vc_font.width; u32 maxcnt = info->pixmap.size/cellsize; u32 scan_align = info->pixmap.scan_align - 1; u32 buf_align = info->pixmap.buf_align - 1; u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; image.fg_color = fg; image.bg_color = bg; image.dx = vxres - ((yy + 1) * vc->vc_font.height); image.dy = xx * vc->vc_font.width; image.width = vc->vc_font.height; image.depth = 1; if (attribute) { buf = kmalloc(cellsize, GFP_KERNEL); if (!buf) return; } while (count) { if (count > maxcnt) cnt = maxcnt; else cnt = count; image.height = vc->vc_font.width * cnt; pitch = ((image.width + 7) >> 3) + scan_align; pitch &= ~scan_align; size = pitch * image.height + buf_align; size &= ~buf_align; dst = fb_get_buffer_offset(info, &info->pixmap, size); image.data = dst; cw_putcs_aligned(vc, info, s, attribute, cnt, pitch, width, cellsize, &image, buf, dst); image.dy += image.height; count -= cnt; s += cnt; } /* buf is always NULL except when in monochrome mode, so in this case it's a gain to check buf against NULL even though kfree() handles NULL pointers just fine */ if (unlikely(buf)) kfree(buf); } static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; unsigned int rw = info->var.yres - (vc->vc_cols*cw); unsigned int bh = info->var.xres - (vc->vc_rows*ch); unsigned int rs = info->var.yres - rw; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; region.color = attr_bgcol_ec(bgshift,vc,info); region.rop = ROP_COPY; if (rw && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; region.width = info->var.xres_virtual; info->fbops->fb_fillrect(info, &region); } if (bh) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; region.width = bh; info->fbops->fb_fillrect(info, &region); } } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; int y = real_y(ops->p, vc->vc_y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; cursor.set = 0; if (softback_lines) { if (y + softback_lines >= vc->vc_rows) { mode = CM_ERASE; ops->cursor_flash = 0; return; } else y += softback_lines; } c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); if (ops->cursor_state.image.data != src || ops->cursor_reset) { ops->cursor_state.image.data = src; cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { u8 *dst; dst = kmalloc(w * vc->vc_font.width, GFP_ATOMIC); if (!dst) return; kfree(ops->cursor_data); ops->cursor_data = dst; cw_update_attr(dst, src, attribute, vc); src = dst; } if (ops->cursor_state.image.fg_color != fg || ops->cursor_state.image.bg_color != bg || ops->cursor_reset) { ops->cursor_state.image.fg_color = fg; ops->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } if (ops->cursor_state.image.height != vc->vc_font.width || ops->cursor_state.image.width != vc->vc_font.height || ops->cursor_reset) { ops->cursor_state.image.height = vc->vc_font.width; ops->cursor_state.image.width = vc->vc_font.height; cursor.set |= FB_CUR_SETSIZE; } dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); dy = vc->vc_x * vc->vc_font.width; if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || ops->cursor_reset) { ops->cursor_state.image.dx = dx; ops->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || ops->cursor_reset) { ops->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || vc->vc_cursor_type != ops->p->cursor_shape || ops->cursor_state.mask == NULL || ops->cursor_reset) { char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC); int cur_height, size, i = 0; int width = (vc->vc_font.width + 7)/8; if (!mask) return; tmp = kmalloc(width * vc->vc_font.height, GFP_ATOMIC); if (!tmp) { kfree(mask); return; } kfree(ops->cursor_state.mask); ops->cursor_state.mask = mask; ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; switch (ops->p->cursor_shape & CUR_HWMASK) { case CUR_NONE: cur_height = 0; break; case CUR_UNDERLINE: cur_height = (vc->vc_font.height < 10) ? 1 : 2; break; case CUR_LOWER_THIRD: cur_height = vc->vc_font.height/3; break; case CUR_LOWER_HALF: cur_height = vc->vc_font.height >> 1; break; case CUR_TWO_THIRDS: cur_height = (vc->vc_font.height << 1)/3; break; case CUR_BLOCK: default: cur_height = vc->vc_font.height; break; } size = (vc->vc_font.height - cur_height) * width; while (size--) tmp[i++] = 0; size = cur_height * width; while (size--) tmp[i++] = 0xff; memset(mask, 0, w * vc->vc_font.width); rotate_cw(tmp, mask, vc->vc_font.width, vc->vc_font.height); kfree(tmp); } switch (mode) { case CM_ERASE: ops->cursor_state.enable = 0; break; case CM_DRAW: case CM_MOVE: default: ops->cursor_state.enable = (use_sw) ? 0 : 1; break; } cursor.image.data = src; cursor.image.fg_color = ops->cursor_state.image.fg_color; cursor.image.bg_color = ops->cursor_state.image.bg_color; cursor.image.dx = ops->cursor_state.image.dx; cursor.image.dy = ops->cursor_state.image.dy; cursor.image.height = ops->cursor_state.image.height; cursor.image.width = ops->cursor_state.image.width; cursor.hot.x = ops->cursor_state.hot.x; cursor.hot.y = ops->cursor_state.hot.y; cursor.mask = ops->cursor_state.mask; cursor.enable = ops->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; if (info->fbops->fb_cursor) err = info->fbops->fb_cursor(info, &cursor); if (err) soft_cursor(info, &cursor); ops->cursor_reset = 0; } static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 vxres = GETVXRES(ops->p->scrollmode, info); u32 xoffset; int err; xoffset = vxres - (info->var.xres + ops->var.yoffset); ops->var.yoffset = ops->var.xoffset; ops->var.xoffset = xoffset; err = fb_pan_display(info, &ops->var); ops->var.xoffset = info->var.xoffset; ops->var.yoffset = info->var.yoffset; ops->var.vmode = info->var.vmode; return err; } void fbcon_rotate_cw(struct fbcon_ops *ops) { ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; ops->cursor = cw_cursor; ops->update_start = cw_update_start; } EXPORT_SYMBOL(fbcon_rotate_cw); MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); MODULE_DESCRIPTION("Console Rotation (90 degrees) Support"); MODULE_LICENSE("GPL");
gpl-2.0
project-voodoo-vibrant/linux_sgh-t759
drivers/mfd/ucb1400_core.c
805
3607
/* * Core functions for: * Philips UCB1400 multifunction chip * * Based on ucb1400_ts.c: * Author: Nicolas Pitre * Created: September 25, 2006 * Copyright: MontaVista Software, Inc. * * Spliting done by: Marek Vasut <marek.vasut@gmail.com> * If something doesnt work and it worked before spliting, e-mail me, * dont bother Nicolas please ;-) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This code is heavily based on ucb1x00-*.c copyrighted by Russell King * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/ucb1400.h> unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, int adcsync) { unsigned int val; if (adcsync) adc_channel |= UCB_ADC_SYNC_ENA; ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel); ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel | UCB_ADC_START); while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA)) & UCB_ADC_DAT_VALID)) schedule_timeout_uninterruptible(1); return val & UCB_ADC_DAT_MASK; } EXPORT_SYMBOL_GPL(ucb1400_adc_read); static int ucb1400_core_probe(struct device *dev) { int err; struct ucb1400 *ucb; struct ucb1400_ts ucb_ts; struct ucb1400_gpio ucb_gpio; struct snd_ac97 *ac97; struct ucb1400_pdata *pdata = dev->platform_data; memset(&ucb_ts, 0, sizeof(ucb_ts)); memset(&ucb_gpio, 0, sizeof(ucb_gpio)); ucb = kzalloc(sizeof(struct ucb1400), GFP_KERNEL); if (!ucb) { err = -ENOMEM; goto err; } dev_set_drvdata(dev, ucb); ac97 = to_ac97_t(dev); ucb_ts.id = ucb1400_reg_read(ac97, UCB_ID); if (ucb_ts.id != UCB_ID_1400) { err = -ENODEV; goto err0; } /* GPIO */ ucb_gpio.ac97 = ac97; ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1); if (!ucb->ucb1400_gpio) { err = -ENOMEM; goto err0; } err = platform_device_add_data(ucb->ucb1400_gpio, &ucb_gpio, sizeof(ucb_gpio)); if (err) goto err1; err = platform_device_add(ucb->ucb1400_gpio); if (err) goto err1; /* TOUCHSCREEN */ ucb_ts.ac97 = ac97; if (pdata != NULL && pdata->irq >= 0) ucb_ts.irq = pdata->irq; else ucb_ts.irq = -1; ucb->ucb1400_ts = platform_device_alloc("ucb1400_ts", -1); if (!ucb->ucb1400_ts) { err = -ENOMEM; goto err2; } err = platform_device_add_data(ucb->ucb1400_ts, &ucb_ts, sizeof(ucb_ts)); if (err) goto err3; err = platform_device_add(ucb->ucb1400_ts); if (err) goto err3; return 0; err3: platform_device_put(ucb->ucb1400_ts); err2: platform_device_unregister(ucb->ucb1400_gpio); err1: platform_device_put(ucb->ucb1400_gpio); err0: kfree(ucb); err: return err; } static int ucb1400_core_remove(struct device *dev) { struct ucb1400 *ucb = dev_get_drvdata(dev); platform_device_unregister(ucb->ucb1400_ts); platform_device_unregister(ucb->ucb1400_gpio); kfree(ucb); return 0; } static struct device_driver ucb1400_core_driver = { .name = "ucb1400_core", .bus = &ac97_bus_type, .probe = ucb1400_core_probe, .remove = ucb1400_core_remove, }; static int __init ucb1400_core_init(void) { return driver_register(&ucb1400_core_driver); } static void __exit ucb1400_core_exit(void) { driver_unregister(&ucb1400_core_driver); } module_init(ucb1400_core_init); module_exit(ucb1400_core_exit); MODULE_DESCRIPTION("Philips UCB1400 driver"); MODULE_LICENSE("GPL");
gpl-2.0
Larspolo/linux
tools/testing/selftests/x86/sigreturn.c
805
19282
/* * sigreturn.c - tests for x86 sigreturn(2) and exit-to-userspace * Copyright (c) 2014-2015 Andrew Lutomirski * * This program is free software; you can redistribute it and/or modify * it under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * This is a series of tests that exercises the sigreturn(2) syscall and * the IRET / SYSRET paths in the kernel. * * For now, this focuses on the effects of unusual CS and SS values, * and it has a bunch of tests to make sure that ESP/RSP is restored * properly. * * The basic idea behind these tests is to raise(SIGUSR1) to create a * sigcontext frame, plug in the values to be tested, and then return, * which implicitly invokes sigreturn(2) and programs the user context * as desired. * * For tests for which we expect sigreturn and the subsequent return to * user mode to succeed, we return to a short trampoline that generates * SIGTRAP so that the meat of the tests can be ordinary C code in a * SIGTRAP handler. * * The inner workings of each test is documented below. * * Do not run on outdated, unpatched kernels at risk of nasty crashes. */ #define _GNU_SOURCE #include <sys/time.h> #include <time.h> #include <stdlib.h> #include <sys/syscall.h> #include <unistd.h> #include <stdio.h> #include <string.h> #include <inttypes.h> #include <sys/mman.h> #include <sys/signal.h> #include <sys/ucontext.h> #include <asm/ldt.h> #include <err.h> #include <setjmp.h> #include <stddef.h> #include <stdbool.h> #include <sys/ptrace.h> #include <sys/user.h> /* * In principle, this test can run on Linux emulation layers (e.g. * Illumos "LX branded zones"). Solaris-based kernels reserve LDT * entries 0-5 for their own internal purposes, so start our LDT * allocations above that reservation. (The tests don't pass on LX * branded zones, but at least this lets them run.) */ #define LDT_OFFSET 6 /* An aligned stack accessible through some of our segments. */ static unsigned char stack16[65536] __attribute__((aligned(4096))); /* * An aligned int3 instruction used as a trampoline. Some of the tests * want to fish out their ss values, so this trampoline copies ss to eax * before the int3. */ asm (".pushsection .text\n\t" ".type int3, @function\n\t" ".align 4096\n\t" "int3:\n\t" "mov %ss,%eax\n\t" "int3\n\t" ".size int3, . - int3\n\t" ".align 4096, 0xcc\n\t" ".popsection"); extern char int3[4096]; /* * At startup, we prepapre: * * - ldt_nonexistent_sel: An LDT entry that doesn't exist (all-zero * descriptor or out of bounds). * - code16_sel: A 16-bit LDT code segment pointing to int3. * - data16_sel: A 16-bit LDT data segment pointing to stack16. * - npcode32_sel: A 32-bit not-present LDT code segment pointing to int3. * - npdata32_sel: A 32-bit not-present LDT data segment pointing to stack16. * - gdt_data16_idx: A 16-bit GDT data segment pointing to stack16. * - gdt_npdata32_idx: A 32-bit not-present GDT data segment pointing to * stack16. * * For no particularly good reason, xyz_sel is a selector value with the * RPL and LDT bits filled in, whereas xyz_idx is just an index into the * descriptor table. These variables will be zero if their respective * segments could not be allocated. */ static unsigned short ldt_nonexistent_sel; static unsigned short code16_sel, data16_sel, npcode32_sel, npdata32_sel; static unsigned short gdt_data16_idx, gdt_npdata32_idx; static unsigned short GDT3(int idx) { return (idx << 3) | 3; } static unsigned short LDT3(int idx) { return (idx << 3) | 7; } /* Our sigaltstack scratch space. */ static char altstack_data[SIGSTKSZ]; static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_sigaction = handler; sa.sa_flags = SA_SIGINFO | flags; sigemptyset(&sa.sa_mask); if (sigaction(sig, &sa, 0)) err(1, "sigaction"); } static void clearhandler(int sig) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; sigemptyset(&sa.sa_mask); if (sigaction(sig, &sa, 0)) err(1, "sigaction"); } static void add_ldt(const struct user_desc *desc, unsigned short *var, const char *name) { if (syscall(SYS_modify_ldt, 1, desc, sizeof(*desc)) == 0) { *var = LDT3(desc->entry_number); } else { printf("[NOTE]\tFailed to create %s segment\n", name); *var = 0; } } static void setup_ldt(void) { if ((unsigned long)stack16 > (1ULL << 32) - sizeof(stack16)) errx(1, "stack16 is too high\n"); if ((unsigned long)int3 > (1ULL << 32) - sizeof(int3)) errx(1, "int3 is too high\n"); ldt_nonexistent_sel = LDT3(LDT_OFFSET + 2); const struct user_desc code16_desc = { .entry_number = LDT_OFFSET + 0, .base_addr = (unsigned long)int3, .limit = 4095, .seg_32bit = 0, .contents = 2, /* Code, not conforming */ .read_exec_only = 0, .limit_in_pages = 0, .seg_not_present = 0, .useable = 0 }; add_ldt(&code16_desc, &code16_sel, "code16"); const struct user_desc data16_desc = { .entry_number = LDT_OFFSET + 1, .base_addr = (unsigned long)stack16, .limit = 0xffff, .seg_32bit = 0, .contents = 0, /* Data, grow-up */ .read_exec_only = 0, .limit_in_pages = 0, .seg_not_present = 0, .useable = 0 }; add_ldt(&data16_desc, &data16_sel, "data16"); const struct user_desc npcode32_desc = { .entry_number = LDT_OFFSET + 3, .base_addr = (unsigned long)int3, .limit = 4095, .seg_32bit = 1, .contents = 2, /* Code, not conforming */ .read_exec_only = 0, .limit_in_pages = 0, .seg_not_present = 1, .useable = 0 }; add_ldt(&npcode32_desc, &npcode32_sel, "npcode32"); const struct user_desc npdata32_desc = { .entry_number = LDT_OFFSET + 4, .base_addr = (unsigned long)stack16, .limit = 0xffff, .seg_32bit = 1, .contents = 0, /* Data, grow-up */ .read_exec_only = 0, .limit_in_pages = 0, .seg_not_present = 1, .useable = 0 }; add_ldt(&npdata32_desc, &npdata32_sel, "npdata32"); struct user_desc gdt_data16_desc = { .entry_number = -1, .base_addr = (unsigned long)stack16, .limit = 0xffff, .seg_32bit = 0, .contents = 0, /* Data, grow-up */ .read_exec_only = 0, .limit_in_pages = 0, .seg_not_present = 0, .useable = 0 }; if (syscall(SYS_set_thread_area, &gdt_data16_desc) == 0) { /* * This probably indicates vulnerability to CVE-2014-8133. * Merely getting here isn't definitive, though, and we'll * diagnose the problem for real later on. */ printf("[WARN]\tset_thread_area allocated data16 at index %d\n", gdt_data16_desc.entry_number); gdt_data16_idx = gdt_data16_desc.entry_number; } else { printf("[OK]\tset_thread_area refused 16-bit data\n"); } struct user_desc gdt_npdata32_desc = { .entry_number = -1, .base_addr = (unsigned long)stack16, .limit = 0xffff, .seg_32bit = 1, .contents = 0, /* Data, grow-up */ .read_exec_only = 0, .limit_in_pages = 0, .seg_not_present = 1, .useable = 0 }; if (syscall(SYS_set_thread_area, &gdt_npdata32_desc) == 0) { /* * As a hardening measure, newer kernels don't allow this. */ printf("[WARN]\tset_thread_area allocated npdata32 at index %d\n", gdt_npdata32_desc.entry_number); gdt_npdata32_idx = gdt_npdata32_desc.entry_number; } else { printf("[OK]\tset_thread_area refused 16-bit data\n"); } } /* State used by our signal handlers. */ static gregset_t initial_regs, requested_regs, resulting_regs; /* Instructions for the SIGUSR1 handler. */ static volatile unsigned short sig_cs, sig_ss; static volatile sig_atomic_t sig_trapped, sig_err, sig_trapno; /* Abstractions for some 32-bit vs 64-bit differences. */ #ifdef __x86_64__ # define REG_IP REG_RIP # define REG_SP REG_RSP # define REG_AX REG_RAX struct selectors { unsigned short cs, gs, fs, ss; }; static unsigned short *ssptr(ucontext_t *ctx) { struct selectors *sels = (void *)&ctx->uc_mcontext.gregs[REG_CSGSFS]; return &sels->ss; } static unsigned short *csptr(ucontext_t *ctx) { struct selectors *sels = (void *)&ctx->uc_mcontext.gregs[REG_CSGSFS]; return &sels->cs; } #else # define REG_IP REG_EIP # define REG_SP REG_ESP # define REG_AX REG_EAX static greg_t *ssptr(ucontext_t *ctx) { return &ctx->uc_mcontext.gregs[REG_SS]; } static greg_t *csptr(ucontext_t *ctx) { return &ctx->uc_mcontext.gregs[REG_CS]; } #endif /* Number of errors in the current test case. */ static volatile sig_atomic_t nerrs; /* * SIGUSR1 handler. Sets CS and SS as requested and points IP to the * int3 trampoline. Sets SP to a large known value so that we can see * whether the value round-trips back to user mode correctly. */ static void sigusr1(int sig, siginfo_t *info, void *ctx_void) { ucontext_t *ctx = (ucontext_t*)ctx_void; memcpy(&initial_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t)); *csptr(ctx) = sig_cs; *ssptr(ctx) = sig_ss; ctx->uc_mcontext.gregs[REG_IP] = sig_cs == code16_sel ? 0 : (unsigned long)&int3; ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL; ctx->uc_mcontext.gregs[REG_AX] = 0; memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t)); requested_regs[REG_AX] = *ssptr(ctx); /* The asm code does this. */ return; } /* * Called after a successful sigreturn. Restores our state so that * the original raise(SIGUSR1) returns. */ static void sigtrap(int sig, siginfo_t *info, void *ctx_void) { ucontext_t *ctx = (ucontext_t*)ctx_void; sig_err = ctx->uc_mcontext.gregs[REG_ERR]; sig_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO]; unsigned short ss; asm ("mov %%ss,%0" : "=r" (ss)); greg_t asm_ss = ctx->uc_mcontext.gregs[REG_AX]; if (asm_ss != sig_ss && sig == SIGTRAP) { /* Sanity check failure. */ printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n", ss, *ssptr(ctx), (unsigned long long)asm_ss); nerrs++; } memcpy(&resulting_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t)); memcpy(&ctx->uc_mcontext.gregs, &initial_regs, sizeof(gregset_t)); sig_trapped = sig; } /* * Checks a given selector for its code bitness or returns -1 if it's not * a usable code segment selector. */ int cs_bitness(unsigned short cs) { uint32_t valid = 0, ar; asm ("lar %[cs], %[ar]\n\t" "jnz 1f\n\t" "mov $1, %[valid]\n\t" "1:" : [ar] "=r" (ar), [valid] "+rm" (valid) : [cs] "r" (cs)); if (!valid) return -1; bool db = (ar & (1 << 22)); bool l = (ar & (1 << 21)); if (!(ar & (1<<11))) return -1; /* Not code. */ if (l && !db) return 64; else if (!l && db) return 32; else if (!l && !db) return 16; else return -1; /* Unknown bitness. */ } /* Finds a usable code segment of the requested bitness. */ int find_cs(int bitness) { unsigned short my_cs; asm ("mov %%cs,%0" : "=r" (my_cs)); if (cs_bitness(my_cs) == bitness) return my_cs; if (cs_bitness(my_cs + (2 << 3)) == bitness) return my_cs + (2 << 3); if (my_cs > (2<<3) && cs_bitness(my_cs - (2 << 3)) == bitness) return my_cs - (2 << 3); if (cs_bitness(code16_sel) == bitness) return code16_sel; printf("[WARN]\tCould not find %d-bit CS\n", bitness); return -1; } static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) { int cs = find_cs(cs_bits); if (cs == -1) { printf("[SKIP]\tCode segment unavailable for %d-bit CS, %d-bit SS\n", cs_bits, use_16bit_ss ? 16 : 32); return 0; } if (force_ss != -1) { sig_ss = force_ss; } else { if (use_16bit_ss) { if (!data16_sel) { printf("[SKIP]\tData segment unavailable for %d-bit CS, 16-bit SS\n", cs_bits); return 0; } sig_ss = data16_sel; } else { asm volatile ("mov %%ss,%0" : "=r" (sig_ss)); } } sig_cs = cs; printf("[RUN]\tValid sigreturn: %d-bit CS (%hx), %d-bit SS (%hx%s)\n", cs_bits, sig_cs, use_16bit_ss ? 16 : 32, sig_ss, (sig_ss & 4) ? "" : ", GDT"); raise(SIGUSR1); nerrs = 0; /* * Check that each register had an acceptable value when the * int3 trampoline was invoked. */ for (int i = 0; i < NGREG; i++) { greg_t req = requested_regs[i], res = resulting_regs[i]; if (i == REG_TRAPNO || i == REG_IP) continue; /* don't care */ if (i == REG_SP) { printf("\tSP: %llx -> %llx\n", (unsigned long long)req, (unsigned long long)res); /* * In many circumstances, the high 32 bits of rsp * are zeroed. For example, we could be a real * 32-bit program, or we could hit any of a number * of poorly-documented IRET or segmented ESP * oddities. If this happens, it's okay. */ if (res == (req & 0xFFFFFFFF)) continue; /* OK; not expected to work */ } bool ignore_reg = false; #if __i386__ if (i == REG_UESP) ignore_reg = true; #else if (i == REG_CSGSFS) { struct selectors *req_sels = (void *)&requested_regs[REG_CSGSFS]; struct selectors *res_sels = (void *)&resulting_regs[REG_CSGSFS]; if (req_sels->cs != res_sels->cs) { printf("[FAIL]\tCS mismatch: requested 0x%hx; got 0x%hx\n", req_sels->cs, res_sels->cs); nerrs++; } if (req_sels->ss != res_sels->ss) { printf("[FAIL]\tSS mismatch: requested 0x%hx; got 0x%hx\n", req_sels->ss, res_sels->ss); nerrs++; } continue; } #endif /* Sanity check on the kernel */ if (i == REG_AX && requested_regs[i] != resulting_regs[i]) { printf("[FAIL]\tAX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n", (unsigned long long)requested_regs[i], (unsigned long long)resulting_regs[i]); nerrs++; continue; } if (requested_regs[i] != resulting_regs[i] && !ignore_reg) { /* * SP is particularly interesting here. The * usual cause of failures is that we hit the * nasty IRET case of returning to a 16-bit SS, * in which case bits 16:31 of the *kernel* * stack pointer persist in ESP. */ printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n", i, (unsigned long long)requested_regs[i], (unsigned long long)resulting_regs[i]); nerrs++; } } if (nerrs == 0) printf("[OK]\tall registers okay\n"); return nerrs; } static int test_bad_iret(int cs_bits, unsigned short ss, int force_cs) { int cs = force_cs == -1 ? find_cs(cs_bits) : force_cs; if (cs == -1) return 0; sig_cs = cs; sig_ss = ss; printf("[RUN]\t%d-bit CS (%hx), bogus SS (%hx)\n", cs_bits, sig_cs, sig_ss); sig_trapped = 0; raise(SIGUSR1); if (sig_trapped) { char errdesc[32] = ""; if (sig_err) { const char *src = (sig_err & 1) ? " EXT" : ""; const char *table; if ((sig_err & 0x6) == 0x0) table = "GDT"; else if ((sig_err & 0x6) == 0x4) table = "LDT"; else if ((sig_err & 0x6) == 0x2) table = "IDT"; else table = "???"; sprintf(errdesc, "%s%s index %d, ", table, src, sig_err >> 3); } char trapname[32]; if (sig_trapno == 13) strcpy(trapname, "GP"); else if (sig_trapno == 11) strcpy(trapname, "NP"); else if (sig_trapno == 12) strcpy(trapname, "SS"); else if (sig_trapno == 32) strcpy(trapname, "IRET"); /* X86_TRAP_IRET */ else sprintf(trapname, "%d", sig_trapno); printf("[OK]\tGot #%s(0x%lx) (i.e. %s%s)\n", trapname, (unsigned long)sig_err, errdesc, strsignal(sig_trapped)); return 0; } else { printf("[FAIL]\tDid not get SIGSEGV\n"); return 1; } } int main() { int total_nerrs = 0; unsigned short my_cs, my_ss; asm volatile ("mov %%cs,%0" : "=r" (my_cs)); asm volatile ("mov %%ss,%0" : "=r" (my_ss)); setup_ldt(); stack_t stack = { .ss_sp = altstack_data, .ss_size = SIGSTKSZ, }; if (sigaltstack(&stack, NULL) != 0) err(1, "sigaltstack"); sethandler(SIGUSR1, sigusr1, 0); sethandler(SIGTRAP, sigtrap, SA_ONSTACK); /* Easy cases: return to a 32-bit SS in each possible CS bitness. */ total_nerrs += test_valid_sigreturn(64, false, -1); total_nerrs += test_valid_sigreturn(32, false, -1); total_nerrs += test_valid_sigreturn(16, false, -1); /* * Test easy espfix cases: return to a 16-bit LDT SS in each possible * CS bitness. NB: with a long mode CS, the SS bitness is irrelevant. * * This catches the original missing-espfix-on-64-bit-kernels issue * as well as CVE-2014-8134. */ total_nerrs += test_valid_sigreturn(64, true, -1); total_nerrs += test_valid_sigreturn(32, true, -1); total_nerrs += test_valid_sigreturn(16, true, -1); if (gdt_data16_idx) { /* * For performance reasons, Linux skips espfix if SS points * to the GDT. If we were able to allocate a 16-bit SS in * the GDT, see if it leaks parts of the kernel stack pointer. * * This tests for CVE-2014-8133. */ total_nerrs += test_valid_sigreturn(64, true, GDT3(gdt_data16_idx)); total_nerrs += test_valid_sigreturn(32, true, GDT3(gdt_data16_idx)); total_nerrs += test_valid_sigreturn(16, true, GDT3(gdt_data16_idx)); } /* * We're done testing valid sigreturn cases. Now we test states * for which sigreturn itself will succeed but the subsequent * entry to user mode will fail. * * Depending on the failure mode and the kernel bitness, these * entry failures can generate SIGSEGV, SIGBUS, or SIGILL. */ clearhandler(SIGTRAP); sethandler(SIGSEGV, sigtrap, SA_ONSTACK); sethandler(SIGBUS, sigtrap, SA_ONSTACK); sethandler(SIGILL, sigtrap, SA_ONSTACK); /* 32-bit kernels do this */ /* Easy failures: invalid SS, resulting in #GP(0) */ test_bad_iret(64, ldt_nonexistent_sel, -1); test_bad_iret(32, ldt_nonexistent_sel, -1); test_bad_iret(16, ldt_nonexistent_sel, -1); /* These fail because SS isn't a data segment, resulting in #GP(SS) */ test_bad_iret(64, my_cs, -1); test_bad_iret(32, my_cs, -1); test_bad_iret(16, my_cs, -1); /* Try to return to a not-present code segment, triggering #NP(SS). */ test_bad_iret(32, my_ss, npcode32_sel); /* * Try to return to a not-present but otherwise valid data segment. * This will cause IRET to fail with #SS on the espfix stack. This * exercises CVE-2014-9322. * * Note that, if espfix is enabled, 64-bit Linux will lose track * of the actual cause of failure and report #GP(0) instead. * This would be very difficult for Linux to avoid, because * espfix64 causes IRET failures to be promoted to #DF, so the * original exception frame is never pushed onto the stack. */ test_bad_iret(32, npdata32_sel, -1); /* * Try to return to a not-present but otherwise valid data * segment without invoking espfix. Newer kernels don't allow * this to happen in the first place. On older kernels, though, * this can trigger CVE-2014-9322. */ if (gdt_npdata32_idx) test_bad_iret(32, GDT3(gdt_npdata32_idx), -1); return total_nerrs ? 1 : 0; }
gpl-2.0
gsstudios/Dorimanx-SG2-I9100-Kernel
drivers/ata/pdc_adma.c
805
16691
/* * pdc_adma.c - Pacific Digital Corporation ADMA * * Maintained by: Tejun Heo <tj@kernel.org> * * Copyright 2005 Mark Lord * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * * Supports ATA disks in single-packet ADMA mode. * Uses PIO for everything else. * * TODO: Use ADMA transfers for ATAPI devices, when possible. * This requires careful attention to a number of quirks of the chip. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pdc_adma" #define DRV_VERSION "1.0" /* macro to calculate base address for ATA regs */ #define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40)) /* macro to calculate base address for ADMA regs */ #define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20)) /* macro to obtain addresses from ata_port */ #define ADMA_PORT_REGS(ap) \ ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no) enum { ADMA_MMIO_BAR = 4, ADMA_PORTS = 2, ADMA_CPB_BYTES = 40, ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16, ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES, ADMA_DMA_BOUNDARY = 0xffffffff, /* global register offsets */ ADMA_MODE_LOCK = 0x00c7, /* per-channel register offsets */ ADMA_CONTROL = 0x0000, /* ADMA control */ ADMA_STATUS = 0x0002, /* ADMA status */ ADMA_CPB_COUNT = 0x0004, /* CPB count */ ADMA_CPB_CURRENT = 0x000c, /* current CPB address */ ADMA_CPB_NEXT = 0x000c, /* next CPB address */ ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */ ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */ ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */ /* ADMA_CONTROL register bits */ aNIEN = (1 << 8), /* irq mask: 1==masked */ aGO = (1 << 7), /* packet trigger ("Go!") */ aRSTADM = (1 << 5), /* ADMA logic reset */ aPIOMD4 = 0x0003, /* PIO mode 4 */ /* ADMA_STATUS register bits */ aPSD = (1 << 6), aUIRQ = (1 << 4), aPERR = (1 << 0), /* CPB bits */ cDONE = (1 << 0), cATERR = (1 << 3), cVLD = (1 << 0), cDAT = (1 << 2), cIEN = (1 << 3), /* PRD bits */ pORD = (1 << 4), pDIRO = (1 << 5), pEND = (1 << 7), /* ATA register flags */ rIGN = (1 << 5), rEND = (1 << 7), /* ATA register addresses */ ADMA_REGS_CONTROL = 0x0e, ADMA_REGS_SECTOR_COUNT = 0x12, ADMA_REGS_LBA_LOW = 0x13, ADMA_REGS_LBA_MID = 0x14, ADMA_REGS_LBA_HIGH = 0x15, ADMA_REGS_DEVICE = 0x16, ADMA_REGS_COMMAND = 0x17, /* PCI device IDs */ board_1841_idx = 0, /* ADMA 2-port controller */ }; typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t; struct adma_port_priv { u8 *pkt; dma_addr_t pkt_dma; adma_state_t state; }; static int adma_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int adma_port_start(struct ata_port *ap); static void adma_port_stop(struct ata_port *ap); static void adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); static int adma_check_atapi_dma(struct ata_queued_cmd *qc); static void adma_freeze(struct ata_port *ap); static void adma_thaw(struct ata_port *ap); static int adma_prereset(struct ata_link *link, unsigned long deadline); static struct scsi_host_template adma_ata_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = LIBATA_MAX_PRD, .dma_boundary = ADMA_DMA_BOUNDARY, }; static struct ata_port_operations adma_ata_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, .check_atapi_dma = adma_check_atapi_dma, .qc_prep = adma_qc_prep, .qc_issue = adma_qc_issue, .freeze = adma_freeze, .thaw = adma_thaw, .prereset = adma_prereset, .port_start = adma_port_start, .port_stop = adma_port_stop, }; static struct ata_port_info adma_port_info[] = { /* board_1841_idx */ { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING, .pio_mask = ATA_PIO4_ONLY, .udma_mask = ATA_UDMA4, .port_ops = &adma_ata_ops, }, }; static const struct pci_device_id adma_ata_pci_tbl[] = { { PCI_VDEVICE(PDC, 0x1841), board_1841_idx }, { } /* terminate list */ }; static struct pci_driver adma_ata_pci_driver = { .name = DRV_NAME, .id_table = adma_ata_pci_tbl, .probe = adma_ata_init_one, .remove = ata_pci_remove_one, }; static int adma_check_atapi_dma(struct ata_queued_cmd *qc) { return 1; /* ATAPI DMA not yet supported */ } static void adma_reset_engine(struct ata_port *ap) { void __iomem *chan = ADMA_PORT_REGS(ap); /* reset ADMA to idle state */ writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); udelay(2); writew(aPIOMD4, chan + ADMA_CONTROL); udelay(2); } static void adma_reinit_engine(struct ata_port *ap) { struct adma_port_priv *pp = ap->private_data; void __iomem *chan = ADMA_PORT_REGS(ap); /* mask/clear ATA interrupts */ writeb(ATA_NIEN, ap->ioaddr.ctl_addr); ata_sff_check_status(ap); /* reset the ADMA engine */ adma_reset_engine(ap); /* set in-FIFO threshold to 0x100 */ writew(0x100, chan + ADMA_FIFO_IN); /* set CPB pointer */ writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT); /* set out-FIFO threshold to 0x100 */ writew(0x100, chan + ADMA_FIFO_OUT); /* set CPB count */ writew(1, chan + ADMA_CPB_COUNT); /* read/discard ADMA status */ readb(chan + ADMA_STATUS); } static inline void adma_enter_reg_mode(struct ata_port *ap) { void __iomem *chan = ADMA_PORT_REGS(ap); writew(aPIOMD4, chan + ADMA_CONTROL); readb(chan + ADMA_STATUS); /* flush */ } static void adma_freeze(struct ata_port *ap) { void __iomem *chan = ADMA_PORT_REGS(ap); /* mask/clear ATA interrupts */ writeb(ATA_NIEN, ap->ioaddr.ctl_addr); ata_sff_check_status(ap); /* reset ADMA to idle state */ writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); udelay(2); writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL); udelay(2); } static void adma_thaw(struct ata_port *ap) { adma_reinit_engine(ap); } static int adma_prereset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct adma_port_priv *pp = ap->private_data; if (pp->state != adma_state_idle) /* healthy paranoia */ pp->state = adma_state_mmio; adma_reinit_engine(ap); return ata_sff_prereset(link, deadline); } static int adma_fill_sg(struct ata_queued_cmd *qc) { struct scatterlist *sg; struct ata_port *ap = qc->ap; struct adma_port_priv *pp = ap->private_data; u8 *buf = pp->pkt, *last_buf = NULL; int i = (2 + buf[3]) * 8; u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); unsigned int si; for_each_sg(qc->sg, sg, qc->n_elem, si) { u32 addr; u32 len; addr = (u32)sg_dma_address(sg); *(__le32 *)(buf + i) = cpu_to_le32(addr); i += 4; len = sg_dma_len(sg) >> 3; *(__le32 *)(buf + i) = cpu_to_le32(len); i += 4; last_buf = &buf[i]; buf[i++] = pFLAGS; buf[i++] = qc->dev->dma_mode & 0xf; buf[i++] = 0; /* pPKLW */ buf[i++] = 0; /* reserved */ *(__le32 *)(buf + i) = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); i += 4; VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, (unsigned long)addr, len); } if (likely(last_buf)) *last_buf |= pEND; return i; } static void adma_qc_prep(struct ata_queued_cmd *qc) { struct adma_port_priv *pp = qc->ap->private_data; u8 *buf = pp->pkt; u32 pkt_dma = (u32)pp->pkt_dma; int i = 0; VPRINTK("ENTER\n"); adma_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) return; buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* reserved */ buf[i++] = cVLD | cDAT | cIEN; i++; /* cLEN, gets filled in below */ *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */ i += 4; /* cNCPB */ i += 4; /* cPRD, gets filled in below */ buf[i++] = 0; /* reserved */ buf[i++] = 0; /* reserved */ buf[i++] = 0; /* reserved */ buf[i++] = 0; /* reserved */ /* ATA registers; must be a multiple of 4 */ buf[i++] = qc->tf.device; buf[i++] = ADMA_REGS_DEVICE; if ((qc->tf.flags & ATA_TFLAG_LBA48)) { buf[i++] = qc->tf.hob_nsect; buf[i++] = ADMA_REGS_SECTOR_COUNT; buf[i++] = qc->tf.hob_lbal; buf[i++] = ADMA_REGS_LBA_LOW; buf[i++] = qc->tf.hob_lbam; buf[i++] = ADMA_REGS_LBA_MID; buf[i++] = qc->tf.hob_lbah; buf[i++] = ADMA_REGS_LBA_HIGH; } buf[i++] = qc->tf.nsect; buf[i++] = ADMA_REGS_SECTOR_COUNT; buf[i++] = qc->tf.lbal; buf[i++] = ADMA_REGS_LBA_LOW; buf[i++] = qc->tf.lbam; buf[i++] = ADMA_REGS_LBA_MID; buf[i++] = qc->tf.lbah; buf[i++] = ADMA_REGS_LBA_HIGH; buf[i++] = 0; buf[i++] = ADMA_REGS_CONTROL; buf[i++] = rIGN; buf[i++] = 0; buf[i++] = qc->tf.command; buf[i++] = ADMA_REGS_COMMAND | rEND; buf[3] = (i >> 3) - 2; /* cLEN */ *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */ i = adma_fill_sg(qc); wmb(); /* flush PRDs and pkt to memory */ #if 0 /* dump out CPB + PRDs for debug */ { int j, len = 0; static char obuf[2048]; for (j = 0; j < i; ++j) { len += sprintf(obuf+len, "%02x ", buf[j]); if ((j & 7) == 7) { printk("%s\n", obuf); len = 0; } } if (len) printk("%s\n", obuf); } #endif } static inline void adma_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *chan = ADMA_PORT_REGS(ap); VPRINTK("ENTER, ap %p\n", ap); /* fire up the ADMA engine */ writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); } static unsigned int adma_qc_issue(struct ata_queued_cmd *qc) { struct adma_port_priv *pp = qc->ap->private_data; switch (qc->tf.protocol) { case ATA_PROT_DMA: pp->state = adma_state_pkt; adma_packet_start(qc); return 0; case ATAPI_PROT_DMA: BUG(); break; default: break; } pp->state = adma_state_mmio; return ata_sff_qc_issue(qc); } static inline unsigned int adma_intr_pkt(struct ata_host *host) { unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { struct ata_port *ap = host->ports[port_no]; struct adma_port_priv *pp; struct ata_queued_cmd *qc; void __iomem *chan = ADMA_PORT_REGS(ap); u8 status = readb(chan + ADMA_STATUS); if (status == 0) continue; handled = 1; adma_enter_reg_mode(ap); pp = ap->private_data; if (!pp || pp->state != adma_state_pkt) continue; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { if (status & aPERR) qc->err_mask |= AC_ERR_HOST_BUS; else if ((status & (aPSD | aUIRQ))) qc->err_mask |= AC_ERR_OTHER; if (pp->pkt[0] & cATERR) qc->err_mask |= AC_ERR_DEV; else if (pp->pkt[0] != cDONE) qc->err_mask |= AC_ERR_OTHER; if (!qc->err_mask) ata_qc_complete(qc); else { struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "ADMA-status 0x%02X", status); ata_ehi_push_desc(ehi, "pkt[0] 0x%02X", pp->pkt[0]); if (qc->err_mask == AC_ERR_DEV) ata_port_abort(ap); else ata_port_freeze(ap); } } } return handled; } static inline unsigned int adma_intr_mmio(struct ata_host *host) { unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { struct ata_port *ap = host->ports[port_no]; struct adma_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; if (!pp || pp->state != adma_state_mmio) continue; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { /* check main status, clearing INTRQ */ u8 status = ata_sff_check_status(ap); if ((status & ATA_BUSY)) continue; DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", ap->print_id, qc->tf.protocol, status); /* complete taskfile transaction */ pp->state = adma_state_idle; qc->err_mask |= ac_err_mask(status); if (!qc->err_mask) ata_qc_complete(qc); else { struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "status 0x%02X", status); if (qc->err_mask == AC_ERR_DEV) ata_port_abort(ap); else ata_port_freeze(ap); } handled = 1; } } return handled; } static irqreturn_t adma_intr(int irq, void *dev_instance) { struct ata_host *host = dev_instance; unsigned int handled = 0; VPRINTK("ENTER\n"); spin_lock(&host->lock); handled = adma_intr_pkt(host) | adma_intr_mmio(host); spin_unlock(&host->lock); VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = port->data_addr = base + 0x000; port->error_addr = port->feature_addr = base + 0x004; port->nsect_addr = base + 0x008; port->lbal_addr = base + 0x00c; port->lbam_addr = base + 0x010; port->lbah_addr = base + 0x014; port->device_addr = base + 0x018; port->status_addr = port->command_addr = base + 0x01c; port->altstatus_addr = port->ctl_addr = base + 0x038; } static int adma_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct adma_port_priv *pp; adma_enter_reg_mode(ap); pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, GFP_KERNEL); if (!pp->pkt) return -ENOMEM; /* paranoia? */ if ((pp->pkt_dma & 7) != 0) { printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n", (u32)pp->pkt_dma); return -ENOMEM; } memset(pp->pkt, 0, ADMA_PKT_BYTES); ap->private_data = pp; adma_reinit_engine(ap); return 0; } static void adma_port_stop(struct ata_port *ap) { adma_reset_engine(ap); } static void adma_host_init(struct ata_host *host, unsigned int chip_id) { unsigned int port_no; /* enable/lock aGO operation */ writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK); /* reset the ADMA logic */ for (port_no = 0; port_no < ADMA_PORTS; ++port_no) adma_reset_engine(host->ports[port_no]); } static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) { int rc; rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } return 0; } static int adma_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_idx = (unsigned int) ent->driver_data; const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL }; struct ata_host *host; void __iomem *mmio_base; int rc, port_no; ata_print_version_once(&pdev->dev, DRV_VERSION); /* alloc host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS); if (!host) return -ENOMEM; /* acquire resources and fill host */ rc = pcim_enable_device(pdev); if (rc) return rc; if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) return -ENODEV; rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); mmio_base = host->iomap[ADMA_MMIO_BAR]; rc = adma_set_dma_masks(pdev, mmio_base); if (rc) return rc; for (port_no = 0; port_no < ADMA_PORTS; ++port_no) { struct ata_port *ap = host->ports[port_no]; void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no); unsigned int offset = port_base - mmio_base; adma_ata_setup_port(&ap->ioaddr, port_base); ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio"); ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port"); } /* initialize adapter */ adma_host_init(host, board_idx); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED, &adma_ata_sht); } module_pci_driver(adma_ata_pci_driver); MODULE_AUTHOR("Mark Lord"); MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
buglabs/bug20-2.6.35-linaro
net/ipv6/anycast.c
805
11811
/* * Anycast support for IPv6 * Linux INET6 implementation * * Authors: * David L Stevens (dlstevens@us.ibm.com) * * based heavily on net/ipv6/mcast.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/random.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/route.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/checksum.h> static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); /* Big ac list lock for all the sockets */ static DEFINE_RWLOCK(ipv6_sk_ac_lock); /* * socket join an anycast group */ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; struct inet6_dev *idev; struct ipv6_ac_socklist *pac; struct net *net = sock_net(sk); int ishost = !net->ipv6.devconf_all->forwarding; int err = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ipv6_addr_is_multicast(addr)) return -EINVAL; if (ipv6_chk_addr(net, addr, NULL, 0)) return -EINVAL; pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL); if (pac == NULL) return -ENOMEM; pac->acl_next = NULL; ipv6_addr_copy(&pac->acl_addr, addr); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { dev = rt->rt6i_dev; dev_hold(dev); dst_release(&rt->u.dst); } else if (ishost) { err = -EADDRNOTAVAIL; goto out_free_pac; } else { /* router, no matching interface: just pick one */ dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK); } } else dev = dev_get_by_index(net, ifindex); if (dev == NULL) { err = -ENODEV; goto out_free_pac; } idev = in6_dev_get(dev); if (!idev) { if (ifindex) err = -ENODEV; else err = -EADDRNOTAVAIL; goto out_dev_put; } /* reset ishost, now that we have a specific device */ ishost = !idev->cnf.forwarding; in6_dev_put(idev); pac->acl_ifindex = dev->ifindex; /* XXX * For hosts, allow link-local or matching prefix anycasts. * This obviates the need for propagating anycast routes while * still allowing some non-router anycast participation. */ if (!ipv6_chk_prefix(addr, dev)) { if (ishost) err = -EADDRNOTAVAIL; if (err) goto out_dev_put; } err = ipv6_dev_ac_inc(dev, addr); if (err) goto out_dev_put; write_lock_bh(&ipv6_sk_ac_lock); pac->acl_next = np->ipv6_ac_list; np->ipv6_ac_list = pac; write_unlock_bh(&ipv6_sk_ac_lock); dev_put(dev); return 0; out_dev_put: dev_put(dev); out_free_pac: sock_kfree_s(sk, pac, sizeof(*pac)); return err; } /* * socket leave an anycast group */ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev; struct ipv6_ac_socklist *pac, *prev_pac; struct net *net = sock_net(sk); write_lock_bh(&ipv6_sk_ac_lock); prev_pac = NULL; for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) { if ((ifindex == 0 || pac->acl_ifindex == ifindex) && ipv6_addr_equal(&pac->acl_addr, addr)) break; prev_pac = pac; } if (!pac) { write_unlock_bh(&ipv6_sk_ac_lock); return -ENOENT; } if (prev_pac) prev_pac->acl_next = pac->acl_next; else np->ipv6_ac_list = pac->acl_next; write_unlock_bh(&ipv6_sk_ac_lock); dev = dev_get_by_index(net, pac->acl_ifindex); if (dev) { ipv6_dev_ac_dec(dev, &pac->acl_addr); dev_put(dev); } sock_kfree_s(sk, pac, sizeof(*pac)); return 0; } void ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; struct ipv6_ac_socklist *pac; struct net *net = sock_net(sk); int prev_index; write_lock_bh(&ipv6_sk_ac_lock); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; write_unlock_bh(&ipv6_sk_ac_lock); prev_index = 0; while (pac) { struct ipv6_ac_socklist *next = pac->acl_next; if (pac->acl_ifindex != prev_index) { if (dev) dev_put(dev); dev = dev_get_by_index(net, pac->acl_ifindex); prev_index = pac->acl_ifindex; } if (dev) ipv6_dev_ac_dec(dev, &pac->acl_addr); sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } if (dev) dev_put(dev); } #if 0 /* The function is not used, which is funny. Apparently, author * supposed to use it to filter out datagrams inside udp/raw but forgot. * * It is OK, anycasts are not special comparing to delivery to unicasts. */ int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex) { struct ipv6_ac_socklist *pac; struct ipv6_pinfo *np = inet6_sk(sk); int found; found = 0; read_lock(&ipv6_sk_ac_lock); for (pac=np->ipv6_ac_list; pac; pac=pac->acl_next) { if (ifindex && pac->acl_ifindex != ifindex) continue; found = ipv6_addr_equal(&pac->acl_addr, addr); if (found) break; } read_unlock(&ipv6_sk_ac_lock); return found; } #endif static void aca_put(struct ifacaddr6 *ac) { if (atomic_dec_and_test(&ac->aca_refcnt)) { in6_dev_put(ac->aca_idev); dst_release(&ac->aca_rt->u.dst); kfree(ac); } } /* * device anycast group inc (add if not found) */ int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) { struct ifacaddr6 *aca; struct inet6_dev *idev; struct rt6_info *rt; int err; idev = in6_dev_get(dev); if (idev == NULL) return -EINVAL; write_lock_bh(&idev->lock); if (idev->dead) { err = -ENODEV; goto out; } for (aca = idev->ac_list; aca; aca = aca->aca_next) { if (ipv6_addr_equal(&aca->aca_addr, addr)) { aca->aca_users++; err = 0; goto out; } } /* * not found: create a new one. */ aca = kzalloc(sizeof(struct ifacaddr6), GFP_ATOMIC); if (aca == NULL) { err = -ENOMEM; goto out; } rt = addrconf_dst_alloc(idev, addr, 1); if (IS_ERR(rt)) { kfree(aca); err = PTR_ERR(rt); goto out; } ipv6_addr_copy(&aca->aca_addr, addr); aca->aca_idev = idev; aca->aca_rt = rt; aca->aca_users = 1; /* aca_tstamp should be updated upon changes */ aca->aca_cstamp = aca->aca_tstamp = jiffies; atomic_set(&aca->aca_refcnt, 2); spin_lock_init(&aca->aca_lock); aca->aca_next = idev->ac_list; idev->ac_list = aca; write_unlock_bh(&idev->lock); ip6_ins_rt(rt); addrconf_join_solict(dev, &aca->aca_addr); aca_put(aca); return 0; out: write_unlock_bh(&idev->lock); in6_dev_put(idev); return err; } /* * device anycast group decrement */ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) { struct ifacaddr6 *aca, *prev_aca; write_lock_bh(&idev->lock); prev_aca = NULL; for (aca = idev->ac_list; aca; aca = aca->aca_next) { if (ipv6_addr_equal(&aca->aca_addr, addr)) break; prev_aca = aca; } if (!aca) { write_unlock_bh(&idev->lock); return -ENOENT; } if (--aca->aca_users > 0) { write_unlock_bh(&idev->lock); return 0; } if (prev_aca) prev_aca->aca_next = aca->aca_next; else idev->ac_list = aca->aca_next; write_unlock_bh(&idev->lock); addrconf_leave_solict(idev, &aca->aca_addr); dst_hold(&aca->aca_rt->u.dst); ip6_del_rt(aca->aca_rt); aca_put(aca); return 0; } static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) { int ret; struct inet6_dev *idev = in6_dev_get(dev); if (idev == NULL) return -ENODEV; ret = __ipv6_dev_ac_dec(idev, addr); in6_dev_put(idev); return ret; } /* * check if the interface has this anycast address */ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) { struct inet6_dev *idev; struct ifacaddr6 *aca; idev = in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); for (aca = idev->ac_list; aca; aca = aca->aca_next) if (ipv6_addr_equal(&aca->aca_addr, addr)) break; read_unlock_bh(&idev->lock); in6_dev_put(idev); return aca != NULL; } return 0; } /* * check if given interface (or any, if dev==0) has this anycast address */ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, struct in6_addr *addr) { int found = 0; if (dev) return ipv6_chk_acast_dev(dev, addr); rcu_read_lock(); for_each_netdev_rcu(net, dev) if (ipv6_chk_acast_dev(dev, addr)) { found = 1; break; } rcu_read_unlock(); return found; } #ifdef CONFIG_PROC_FS struct ac6_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; }; #define ac6_seq_private(seq) ((struct ac6_iter_state *)(seq)->private) static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) { struct ifacaddr6 *im = NULL; struct ac6_iter_state *state = ac6_seq_private(seq); struct net *net = seq_file_net(seq); state->idev = NULL; for_each_netdev_rcu(net, state->dev) { struct inet6_dev *idev; idev = __in6_dev_get(state->dev); if (!idev) continue; read_lock_bh(&idev->lock); im = idev->ac_list; if (im) { state->idev = idev; break; } read_unlock_bh(&idev->lock); } return im; } static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im) { struct ac6_iter_state *state = ac6_seq_private(seq); im = im->aca_next; while (!im) { if (likely(state->idev != NULL)) read_unlock_bh(&state->idev->lock); state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->idev = NULL; break; } state->idev = __in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); im = state->idev->ac_list; } return im; } static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos) { struct ifacaddr6 *im = ac6_get_first(seq); if (im) while (pos && (im = ac6_get_next(seq, im)) != NULL) --pos; return pos ? NULL : im; } static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return ac6_get_idx(seq, *pos); } static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ifacaddr6 *im = ac6_get_next(seq, v); ++*pos; return im; } static void ac6_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { struct ac6_iter_state *state = ac6_seq_private(seq); if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); state->idev = NULL; } rcu_read_unlock(); } static int ac6_seq_show(struct seq_file *seq, void *v) { struct ifacaddr6 *im = (struct ifacaddr6 *)v; struct ac6_iter_state *state = ac6_seq_private(seq); seq_printf(seq, "%-4d %-15s %pi6 %5d\n", state->dev->ifindex, state->dev->name, &im->aca_addr, im->aca_users); return 0; } static const struct seq_operations ac6_seq_ops = { .start = ac6_seq_start, .next = ac6_seq_next, .stop = ac6_seq_stop, .show = ac6_seq_show, }; static int ac6_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ac6_seq_ops, sizeof(struct ac6_iter_state)); } static const struct file_operations ac6_seq_fops = { .owner = THIS_MODULE, .open = ac6_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; int __net_init ac6_proc_init(struct net *net) { if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) return -ENOMEM; return 0; } void ac6_proc_exit(struct net *net) { proc_net_remove(net, "anycast6"); } #endif
gpl-2.0
rbauduin/mptcp
drivers/clk/mxs/clk-imx28.c
1061
10354
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/clk.h> #include <linux/clk/mxs.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "clk.h" static void __iomem *clkctrl; #define CLKCTRL clkctrl #define PLL0CTRL0 (CLKCTRL + 0x0000) #define PLL1CTRL0 (CLKCTRL + 0x0020) #define PLL2CTRL0 (CLKCTRL + 0x0040) #define CPU (CLKCTRL + 0x0050) #define HBUS (CLKCTRL + 0x0060) #define XBUS (CLKCTRL + 0x0070) #define XTAL (CLKCTRL + 0x0080) #define SSP0 (CLKCTRL + 0x0090) #define SSP1 (CLKCTRL + 0x00a0) #define SSP2 (CLKCTRL + 0x00b0) #define SSP3 (CLKCTRL + 0x00c0) #define GPMI (CLKCTRL + 0x00d0) #define SPDIF (CLKCTRL + 0x00e0) #define EMI (CLKCTRL + 0x00f0) #define SAIF0 (CLKCTRL + 0x0100) #define SAIF1 (CLKCTRL + 0x0110) #define LCDIF (CLKCTRL + 0x0120) #define ETM (CLKCTRL + 0x0130) #define ENET (CLKCTRL + 0x0140) #define FLEXCAN (CLKCTRL + 0x0160) #define FRAC0 (CLKCTRL + 0x01b0) #define FRAC1 (CLKCTRL + 0x01c0) #define CLKSEQ (CLKCTRL + 0x01d0) #define BP_CPU_INTERRUPT_WAIT 12 #define BP_SAIF_DIV_FRAC_EN 16 #define BP_ENET_DIV_TIME 21 #define BP_ENET_SLEEP 31 #define BP_CLKSEQ_BYPASS_SAIF0 0 #define BP_CLKSEQ_BYPASS_SSP0 3 #define BP_FRAC0_IO1FRAC 16 #define BP_FRAC0_IO0FRAC 24 static void __iomem *digctrl; #define DIGCTRL digctrl #define BP_SAIF_CLKMUX 10 /* * HW_SAIF_CLKMUX_SEL: * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1 * clock pins selected for SAIF1 input clocks. * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and * SAIF0 clock inputs selected for SAIF1 input clocks. * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input * clocks. * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input * clocks. */ int mxs_saif_clkmux_select(unsigned int clkmux) { if (clkmux > 0x3) return -EINVAL; writel_relaxed(0x3 << BP_SAIF_CLKMUX, DIGCTRL + CLR); writel_relaxed(clkmux << BP_SAIF_CLKMUX, DIGCTRL + SET); return 0; } static void __init clk_misc_init(void) { u32 val; /* Gate off cpu clock in WFI for power saving */ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET); /* 0 is a bad default value for a divider */ writel_relaxed(1 << BP_ENET_DIV_TIME, ENET + SET); /* Clear BYPASS for SAIF */ writel_relaxed(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ + CLR); /* SAIF has to use frac div for functional operation */ val = readl_relaxed(SAIF0); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF0); val = readl_relaxed(SAIF1); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF1); /* Extra fec clock setting */ val = readl_relaxed(ENET); val &= ~(1 << BP_ENET_SLEEP); writel_relaxed(val, ENET); /* * Source ssp clock from ref_io than ref_xtal, * as ref_xtal only provides 24 MHz as maximum. */ writel_relaxed(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ + CLR); /* * 480 MHz seems too high to be ssp clock source directly, * so set frac0 to get a 288 MHz ref_io0 and ref_io1. */ val = readl_relaxed(FRAC0); val &= ~((0x3f << BP_FRAC0_IO0FRAC) | (0x3f << BP_FRAC0_IO1FRAC)); val |= (30 << BP_FRAC0_IO0FRAC) | (30 << BP_FRAC0_IO1FRAC); writel_relaxed(val, FRAC0); } static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", }; static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", }; static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", }; static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", }; static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", }; static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", }; static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", }; static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", }; static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", }; enum imx28_clk { ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1, ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel, ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel, lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus, ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll, emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div, clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0, ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm, fec, can0, can1, usb0, usb1, usb0_phy, usb1_phy, enet_out, clk_max }; static struct clk *clks[clk_max]; static struct clk_onecell_data clk_data; static enum imx28_clk clks_init_on[] __initdata = { cpu, hbus, xbus, emi, uart, }; static void __init mx28_clocks_init(struct device_node *np) { struct device_node *dcnp; u32 i; dcnp = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl"); digctrl = of_iomap(dcnp, 0); WARN_ON(!digctrl); of_node_put(dcnp); clkctrl = of_iomap(np, 0); WARN_ON(!clkctrl); clk_misc_init(); clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000); clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000); clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000); clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000); clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0); clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1); clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2); clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3); clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0); clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1); clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2); clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0)); clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0)); clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi)); clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0)); clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0)); clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1)); clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1)); clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels)); clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu)); clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix)); clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels)); clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels)); clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28); clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29); clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31); clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31); clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29); clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29); clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29); clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29); clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29); clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28); clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29); clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29); clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29); clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27); clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29); clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29); clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750); clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768); clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16); clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4); clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26); clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29); clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31); clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31); clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31); clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31); clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31); clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31); clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31); clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31); clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31); clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31); clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31); clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31); clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30); clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30); clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28); clks[usb0] = mxs_clk_gate("usb0", "usb0_phy", DIGCTRL, 2); clks[usb1] = mxs_clk_gate("usb1", "usb1_phy", DIGCTRL, 16); clks[usb0_phy] = clk_register_gate(NULL, "usb0_phy", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock); clks[usb1_phy] = clk_register_gate(NULL, "usb1_phy", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock); clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock); for (i = 0; i < ARRAY_SIZE(clks); i++) if (IS_ERR(clks[i])) { pr_err("i.MX28 clk %d: register failed with %ld\n", i, PTR_ERR(clks[i])); return; } clk_data.clks = clks; clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_register_clkdev(clks[enet_out], NULL, "enet_out"); for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clks[clks_init_on[i]]); } CLK_OF_DECLARE(imx28_clkctrl, "fsl,imx28-clkctrl", mx28_clocks_init);
gpl-2.0
redfuture/linux-kernel
drivers/clk/mxs/clk-imx28.c
1061
10354
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/clk.h> #include <linux/clk/mxs.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include "clk.h" static void __iomem *clkctrl; #define CLKCTRL clkctrl #define PLL0CTRL0 (CLKCTRL + 0x0000) #define PLL1CTRL0 (CLKCTRL + 0x0020) #define PLL2CTRL0 (CLKCTRL + 0x0040) #define CPU (CLKCTRL + 0x0050) #define HBUS (CLKCTRL + 0x0060) #define XBUS (CLKCTRL + 0x0070) #define XTAL (CLKCTRL + 0x0080) #define SSP0 (CLKCTRL + 0x0090) #define SSP1 (CLKCTRL + 0x00a0) #define SSP2 (CLKCTRL + 0x00b0) #define SSP3 (CLKCTRL + 0x00c0) #define GPMI (CLKCTRL + 0x00d0) #define SPDIF (CLKCTRL + 0x00e0) #define EMI (CLKCTRL + 0x00f0) #define SAIF0 (CLKCTRL + 0x0100) #define SAIF1 (CLKCTRL + 0x0110) #define LCDIF (CLKCTRL + 0x0120) #define ETM (CLKCTRL + 0x0130) #define ENET (CLKCTRL + 0x0140) #define FLEXCAN (CLKCTRL + 0x0160) #define FRAC0 (CLKCTRL + 0x01b0) #define FRAC1 (CLKCTRL + 0x01c0) #define CLKSEQ (CLKCTRL + 0x01d0) #define BP_CPU_INTERRUPT_WAIT 12 #define BP_SAIF_DIV_FRAC_EN 16 #define BP_ENET_DIV_TIME 21 #define BP_ENET_SLEEP 31 #define BP_CLKSEQ_BYPASS_SAIF0 0 #define BP_CLKSEQ_BYPASS_SSP0 3 #define BP_FRAC0_IO1FRAC 16 #define BP_FRAC0_IO0FRAC 24 static void __iomem *digctrl; #define DIGCTRL digctrl #define BP_SAIF_CLKMUX 10 /* * HW_SAIF_CLKMUX_SEL: * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1 * clock pins selected for SAIF1 input clocks. * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and * SAIF0 clock inputs selected for SAIF1 input clocks. * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input * clocks. * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input * clocks. */ int mxs_saif_clkmux_select(unsigned int clkmux) { if (clkmux > 0x3) return -EINVAL; writel_relaxed(0x3 << BP_SAIF_CLKMUX, DIGCTRL + CLR); writel_relaxed(clkmux << BP_SAIF_CLKMUX, DIGCTRL + SET); return 0; } static void __init clk_misc_init(void) { u32 val; /* Gate off cpu clock in WFI for power saving */ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET); /* 0 is a bad default value for a divider */ writel_relaxed(1 << BP_ENET_DIV_TIME, ENET + SET); /* Clear BYPASS for SAIF */ writel_relaxed(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ + CLR); /* SAIF has to use frac div for functional operation */ val = readl_relaxed(SAIF0); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF0); val = readl_relaxed(SAIF1); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF1); /* Extra fec clock setting */ val = readl_relaxed(ENET); val &= ~(1 << BP_ENET_SLEEP); writel_relaxed(val, ENET); /* * Source ssp clock from ref_io than ref_xtal, * as ref_xtal only provides 24 MHz as maximum. */ writel_relaxed(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ + CLR); /* * 480 MHz seems too high to be ssp clock source directly, * so set frac0 to get a 288 MHz ref_io0 and ref_io1. */ val = readl_relaxed(FRAC0); val &= ~((0x3f << BP_FRAC0_IO0FRAC) | (0x3f << BP_FRAC0_IO1FRAC)); val |= (30 << BP_FRAC0_IO0FRAC) | (30 << BP_FRAC0_IO1FRAC); writel_relaxed(val, FRAC0); } static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", }; static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", }; static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", }; static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", }; static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", }; static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", }; static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", }; static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", }; static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", }; enum imx28_clk { ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1, ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel, ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel, lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus, ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll, emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div, clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0, ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm, fec, can0, can1, usb0, usb1, usb0_phy, usb1_phy, enet_out, clk_max }; static struct clk *clks[clk_max]; static struct clk_onecell_data clk_data; static enum imx28_clk clks_init_on[] __initdata = { cpu, hbus, xbus, emi, uart, }; static void __init mx28_clocks_init(struct device_node *np) { struct device_node *dcnp; u32 i; dcnp = of_find_compatible_node(NULL, NULL, "fsl,imx28-digctl"); digctrl = of_iomap(dcnp, 0); WARN_ON(!digctrl); of_node_put(dcnp); clkctrl = of_iomap(np, 0); WARN_ON(!clkctrl); clk_misc_init(); clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000); clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000); clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000); clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000); clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0); clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1); clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2); clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3); clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0); clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1); clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2); clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0)); clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0)); clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi)); clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0)); clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0)); clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1)); clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1)); clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels)); clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu)); clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix)); clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels)); clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels)); clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28); clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29); clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31); clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31); clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29); clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29); clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29); clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29); clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29); clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28); clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29); clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29); clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29); clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27); clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29); clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29); clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750); clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768); clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16); clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4); clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26); clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29); clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31); clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31); clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31); clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31); clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31); clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31); clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31); clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31); clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31); clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31); clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31); clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31); clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30); clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30); clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28); clks[usb0] = mxs_clk_gate("usb0", "usb0_phy", DIGCTRL, 2); clks[usb1] = mxs_clk_gate("usb1", "usb1_phy", DIGCTRL, 16); clks[usb0_phy] = clk_register_gate(NULL, "usb0_phy", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock); clks[usb1_phy] = clk_register_gate(NULL, "usb1_phy", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock); clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock); for (i = 0; i < ARRAY_SIZE(clks); i++) if (IS_ERR(clks[i])) { pr_err("i.MX28 clk %d: register failed with %ld\n", i, PTR_ERR(clks[i])); return; } clk_data.clks = clks; clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_register_clkdev(clks[enet_out], NULL, "enet_out"); for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clks[clks_init_on[i]]); } CLK_OF_DECLARE(imx28_clkctrl, "fsl,imx28-clkctrl", mx28_clocks_init);
gpl-2.0
ninjablocks/VAR-SOM-AM33-SDK7-Kernel
fs/nilfs2/cpfile.c
2853
25075
/* * cpfile.c - NILFS checkpoint file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/errno.h> #include <linux/nilfs2_fs.h> #include "mdt.h" #include "cpfile.h" static inline unsigned long nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) { return NILFS_MDT(cpfile)->mi_entries_per_block; } /* block number from the beginning of the file */ static unsigned long nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) { __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); return (unsigned long)tcno; } /* offset in block */ static unsigned long nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) { __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); } static unsigned long nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, __u64 curr, __u64 max) { return min_t(__u64, nilfs_cpfile_checkpoints_per_block(cpfile) - nilfs_cpfile_get_offset(cpfile, curr), max - curr); } static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, __u64 cno) { return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; } static unsigned int nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, struct buffer_head *bh, void *kaddr, unsigned int n) { struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); unsigned int count; count = le32_to_cpu(cp->cp_checkpoints_count) + n; cp->cp_checkpoints_count = cpu_to_le32(count); return count; } static unsigned int nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, struct buffer_head *bh, void *kaddr, unsigned int n) { struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); unsigned int count; WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); count = le32_to_cpu(cp->cp_checkpoints_count) - n; cp->cp_checkpoints_count = cpu_to_le32(count); return count; } static inline struct nilfs_cpfile_header * nilfs_cpfile_block_get_header(const struct inode *cpfile, struct buffer_head *bh, void *kaddr) { return kaddr + bh_offset(bh); } static struct nilfs_checkpoint * nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, struct buffer_head *bh, void *kaddr) { return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * NILFS_MDT(cpfile)->mi_entry_size; } static void nilfs_cpfile_block_init(struct inode *cpfile, struct buffer_head *bh, void *kaddr) { struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; int n = nilfs_cpfile_checkpoints_per_block(cpfile); while (n-- > 0) { nilfs_checkpoint_set_invalid(cp); cp = (void *)cp + cpsz; } } static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, struct buffer_head **bhp) { return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); } static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, __u64 cno, int create, struct buffer_head **bhp) { return nilfs_mdt_get_block(cpfile, nilfs_cpfile_get_blkoff(cpfile, cno), create, nilfs_cpfile_block_init, bhp); } static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, __u64 cno) { return nilfs_mdt_delete_block(cpfile, nilfs_cpfile_get_blkoff(cpfile, cno)); } /** * nilfs_cpfile_get_checkpoint - get a checkpoint * @cpfile: inode of checkpoint file * @cno: checkpoint number * @create: create flag * @cpp: pointer to a checkpoint * @bhp: pointer to a buffer head * * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint * specified by @cno. A new checkpoint will be created if @cno is the current * checkpoint number and @create is nonzero. * * Return Value: On success, 0 is returned, and the checkpoint and the * buffer head of the buffer on which the checkpoint is located are stored in * the place pointed by @cpp and @bhp, respectively. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No such checkpoint. * * %-EINVAL - invalid checkpoint. */ int nilfs_cpfile_get_checkpoint(struct inode *cpfile, __u64 cno, int create, struct nilfs_checkpoint **cpp, struct buffer_head **bhp) { struct buffer_head *header_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; void *kaddr; int ret; if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || (cno < nilfs_mdt_cno(cpfile) && create))) return -EINVAL; down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_sem; ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); if (ret < 0) goto out_header; kaddr = kmap(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { if (!create) { kunmap(cp_bh->b_page); brelse(cp_bh); ret = -ENOENT; goto out_header; } /* a newly-created checkpoint */ nilfs_checkpoint_clear_invalid(cp); if (!nilfs_cpfile_is_in_first(cpfile, cno)) nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, kaddr, 1); mark_buffer_dirty(cp_bh); kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_ncheckpoints, 1); kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); } if (cpp != NULL) *cpp = cp; *bhp = cp_bh; out_header: brelse(header_bh); out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_put_checkpoint - put a checkpoint * @cpfile: inode of checkpoint file * @cno: checkpoint number * @bh: buffer head * * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint * specified by @cno. @bh must be the buffer head which has been returned by * a previous call to nilfs_cpfile_get_checkpoint() with @cno. */ void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, struct buffer_head *bh) { kunmap(bh->b_page); brelse(bh); } /** * nilfs_cpfile_delete_checkpoints - delete checkpoints * @cpfile: inode of checkpoint file * @start: start checkpoint number * @end: end checkpoint numer * * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in * the period from @start to @end, excluding @end itself. The checkpoints * which have been already deleted are ignored. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - invalid checkpoints. */ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, __u64 start, __u64 end) { struct buffer_head *header_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; __u64 cno; void *kaddr; unsigned long tnicps; int ret, ncps, nicps, nss, count, i; if (unlikely(start == 0 || start > end)) { printk(KERN_ERR "%s: invalid range of checkpoint numbers: " "[%llu, %llu)\n", __func__, (unsigned long long)start, (unsigned long long)end); return -EINVAL; } down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_sem; tnicps = 0; nss = 0; for (cno = start; cno < end; cno += ncps) { ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) { if (ret != -ENOENT) break; /* skip hole */ ret = 0; continue; } kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint( cpfile, cno, cp_bh, kaddr); nicps = 0; for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { if (nilfs_checkpoint_snapshot(cp)) { nss++; } else if (!nilfs_checkpoint_invalid(cp)) { nilfs_checkpoint_set_invalid(cp); nicps++; } } if (nicps > 0) { tnicps += nicps; mark_buffer_dirty(cp_bh); nilfs_mdt_mark_dirty(cpfile); if (!nilfs_cpfile_is_in_first(cpfile, cno)) { count = nilfs_cpfile_block_sub_valid_checkpoints( cpfile, cp_bh, kaddr, nicps); if (count == 0) { /* make hole */ kunmap_atomic(kaddr); brelse(cp_bh); ret = nilfs_cpfile_delete_checkpoint_block( cpfile, cno); if (ret == 0) continue; printk(KERN_ERR "%s: cannot delete block\n", __func__); break; } } } kunmap_atomic(kaddr); brelse(cp_bh); } if (tnicps > 0) { kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); kunmap_atomic(kaddr); } brelse(header_bh); if (nss > 0) ret = -EBUSY; out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, struct nilfs_checkpoint *cp, struct nilfs_cpinfo *ci) { ci->ci_flags = le32_to_cpu(cp->cp_flags); ci->ci_cno = le64_to_cpu(cp->cp_cno); ci->ci_create = le64_to_cpu(cp->cp_create); ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); } static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, void *buf, unsigned cisz, size_t nci) { struct nilfs_checkpoint *cp; struct nilfs_cpinfo *ci = buf; struct buffer_head *bh; size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; void *kaddr; int n, ret; int ncps, i; if (cno == 0) return -ENOENT; /* checkpoint number 0 is invalid */ down_read(&NILFS_MDT(cpfile)->mi_sem); for (n = 0; cno < cur_cno && n < nci; cno += ncps) { ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); if (ret < 0) { if (ret != -ENOENT) goto out; continue; /* skip hole */ } kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { if (!nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); ci = (void *)ci + cisz; n++; } } kunmap_atomic(kaddr); brelse(bh); } ret = n; if (n > 0) { ci = (void *)ci - cisz; *cnop = ci->ci_cno + 1; } out: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, void *buf, unsigned cisz, size_t nci) { struct buffer_head *bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_cpinfo *ci = buf; __u64 curr = *cnop, next; unsigned long curr_blkoff, next_blkoff; void *kaddr; int n = 0, ret; down_read(&NILFS_MDT(cpfile)->mi_sem); if (curr == 0) { ret = nilfs_cpfile_get_header_block(cpfile, &bh); if (ret < 0) goto out; kaddr = kmap_atomic(bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); kunmap_atomic(kaddr); brelse(bh); if (curr == 0) { ret = 0; goto out; } } else if (unlikely(curr == ~(__u64)0)) { ret = 0; goto out; } curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); if (unlikely(ret < 0)) { if (ret == -ENOENT) ret = 0; /* No snapshots (started from a hole block) */ goto out; } kaddr = kmap_atomic(bh->b_page); while (n < nci) { cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); curr = ~(__u64)0; /* Terminator */ if (unlikely(nilfs_checkpoint_invalid(cp) || !nilfs_checkpoint_snapshot(cp))) break; nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); ci = (void *)ci + cisz; n++; next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); if (next == 0) break; /* reach end of the snapshot list */ next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); if (curr_blkoff != next_blkoff) { kunmap_atomic(kaddr); brelse(bh); ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, &bh); if (unlikely(ret < 0)) { WARN_ON(ret == -ENOENT); goto out; } kaddr = kmap_atomic(bh->b_page); } curr = next; curr_blkoff = next_blkoff; } kunmap_atomic(kaddr); brelse(bh); *cnop = curr; ret = n; out: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_get_cpinfo - * @cpfile: * @cno: * @ci: * @nci: */ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, void *buf, unsigned cisz, size_t nci) { switch (mode) { case NILFS_CHECKPOINT: return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); case NILFS_SNAPSHOT: return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); default: return -EINVAL; } } /** * nilfs_cpfile_delete_checkpoint - * @cpfile: * @cno: */ int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) { struct nilfs_cpinfo ci; __u64 tcno = cno; ssize_t nci; nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); if (nci < 0) return nci; else if (nci == 0 || ci.ci_cno != cno) return -ENOENT; else if (nilfs_cpinfo_snapshot(&ci)) return -EBUSY; return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); } static struct nilfs_snapshot_list * nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, __u64 cno, struct buffer_head *bh, void *kaddr) { struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_snapshot_list *list; if (cno != 0) { cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); list = &cp->cp_snapshot_list; } else { header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); list = &header->ch_snapshot_list; } return list; } static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) { struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_snapshot_list *list; __u64 curr, prev; unsigned long curr_blkoff, prev_blkoff; void *kaddr; int ret; if (cno == 0) return -ENOENT; /* checkpoint number 0 is invalid */ down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { ret = -ENOENT; kunmap_atomic(kaddr); goto out_cp; } if (nilfs_checkpoint_snapshot(cp)) { ret = 0; kunmap_atomic(kaddr); goto out_cp; } kunmap_atomic(kaddr); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_cp; kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); list = &header->ch_snapshot_list; curr_bh = header_bh; get_bh(curr_bh); curr = 0; curr_blkoff = 0; prev = le64_to_cpu(list->ssl_prev); while (prev > cno) { prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); curr = prev; if (curr_blkoff != prev_blkoff) { kunmap_atomic(kaddr); brelse(curr_bh); ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &curr_bh); if (ret < 0) goto out_header; kaddr = kmap_atomic(curr_bh->b_page); } curr_blkoff = prev_blkoff; cp = nilfs_cpfile_block_get_checkpoint( cpfile, curr, curr_bh, kaddr); list = &cp->cp_snapshot_list; prev = le64_to_cpu(list->ssl_prev); } kunmap_atomic(kaddr); if (prev != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, &prev_bh); if (ret < 0) goto out_curr; } else { prev_bh = header_bh; get_bh(prev_bh); } kaddr = kmap_atomic(curr_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, curr, curr_bh, kaddr); list->ssl_prev = cpu_to_le64(cno); kunmap_atomic(kaddr); kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); nilfs_checkpoint_set_snapshot(cp); kunmap_atomic(kaddr); kaddr = kmap_atomic(prev_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, prev, prev_bh, kaddr); list->ssl_next = cpu_to_le64(cno); kunmap_atomic(kaddr); kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_nsnapshots, 1); kunmap_atomic(kaddr); mark_buffer_dirty(prev_bh); mark_buffer_dirty(curr_bh); mark_buffer_dirty(cp_bh); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); brelse(prev_bh); out_curr: brelse(curr_bh); out_header: brelse(header_bh); out_cp: brelse(cp_bh); out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) { struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_snapshot_list *list; __u64 next, prev; void *kaddr; int ret; if (cno == 0) return -ENOENT; /* checkpoint number 0 is invalid */ down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { ret = -ENOENT; kunmap_atomic(kaddr); goto out_cp; } if (!nilfs_checkpoint_snapshot(cp)) { ret = 0; kunmap_atomic(kaddr); goto out_cp; } list = &cp->cp_snapshot_list; next = le64_to_cpu(list->ssl_next); prev = le64_to_cpu(list->ssl_prev); kunmap_atomic(kaddr); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_cp; if (next != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, &next_bh); if (ret < 0) goto out_header; } else { next_bh = header_bh; get_bh(next_bh); } if (prev != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, &prev_bh); if (ret < 0) goto out_next; } else { prev_bh = header_bh; get_bh(prev_bh); } kaddr = kmap_atomic(next_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, next, next_bh, kaddr); list->ssl_prev = cpu_to_le64(prev); kunmap_atomic(kaddr); kaddr = kmap_atomic(prev_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, prev, prev_bh, kaddr); list->ssl_next = cpu_to_le64(next); kunmap_atomic(kaddr); kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); nilfs_checkpoint_clear_snapshot(cp); kunmap_atomic(kaddr); kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_nsnapshots, -1); kunmap_atomic(kaddr); mark_buffer_dirty(next_bh); mark_buffer_dirty(prev_bh); mark_buffer_dirty(cp_bh); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); brelse(prev_bh); out_next: brelse(next_bh); out_header: brelse(header_bh); out_cp: brelse(cp_bh); out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_is_snapshot - * @cpfile: inode of checkpoint file * @cno: checkpoint number * * Description: * * Return Value: On success, 1 is returned if the checkpoint specified by * @cno is a snapshot, or 0 if not. On error, one of the following negative * error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No such checkpoint. */ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) { struct buffer_head *bh; struct nilfs_checkpoint *cp; void *kaddr; int ret; /* CP number is invalid if it's zero or larger than the largest exist one.*/ if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) return -ENOENT; down_read(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); if (ret < 0) goto out; kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); if (nilfs_checkpoint_invalid(cp)) ret = -ENOENT; else ret = nilfs_checkpoint_snapshot(cp); kunmap_atomic(kaddr); brelse(bh); out: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_change_cpmode - change checkpoint mode * @cpfile: inode of checkpoint file * @cno: checkpoint number * @status: mode of checkpoint * * Description: nilfs_change_cpmode() changes the mode of the checkpoint * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No such checkpoint. */ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) { int ret; switch (mode) { case NILFS_CHECKPOINT: if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno)) /* * Current implementation does not have to protect * plain read-only mounts since they are exclusive * with a read/write mount and are protected from the * cleaner. */ ret = -EBUSY; else ret = nilfs_cpfile_clear_snapshot(cpfile, cno); return ret; case NILFS_SNAPSHOT: return nilfs_cpfile_set_snapshot(cpfile, cno); default: return -EINVAL; } } /** * nilfs_cpfile_get_stat - get checkpoint statistics * @cpfile: inode of checkpoint file * @stat: pointer to a structure of checkpoint statistics * * Description: nilfs_cpfile_get_stat() returns information about checkpoints. * * Return Value: On success, 0 is returned, and checkpoints information is * stored in the place pointed by @stat. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) { struct buffer_head *bh; struct nilfs_cpfile_header *header; void *kaddr; int ret; down_read(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_header_block(cpfile, &bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); cpstat->cs_cno = nilfs_mdt_cno(cpfile); cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); kunmap_atomic(kaddr); brelse(bh); out_sem: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_read - read or get cpfile inode * @sb: super block instance * @cpsize: size of a checkpoint entry * @raw_inode: on-disk cpfile inode * @inodep: buffer to store the inode */ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, struct nilfs_inode *raw_inode, struct inode **inodep) { struct inode *cpfile; int err; cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO); if (unlikely(!cpfile)) return -ENOMEM; if (!(cpfile->i_state & I_NEW)) goto out; err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0); if (err) goto failed; nilfs_mdt_set_entry_size(cpfile, cpsize, sizeof(struct nilfs_cpfile_header)); err = nilfs_read_inode_common(cpfile, raw_inode); if (err) goto failed; unlock_new_inode(cpfile); out: *inodep = cpfile; return 0; failed: iget_failed(cpfile); return err; }
gpl-2.0
Openwide-Ingenierie/xvisor-next
tools/dtc/livetree.c
5669
13912
/* * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "dtc.h" /* * Tree building functions */ void add_label(struct label **labels, char *label) { struct label *new; /* Make sure the label isn't already there */ for_each_label_withdel(*labels, new) if (streq(new->label, label)) { new->deleted = 0; return; } new = xmalloc(sizeof(*new)); memset(new, 0, sizeof(*new)); new->label = label; new->next = *labels; *labels = new; } void delete_labels(struct label **labels) { struct label *label; for_each_label(*labels, label) label->deleted = 1; } struct property *build_property(char *name, struct data val) { struct property *new = xmalloc(sizeof(*new)); memset(new, 0, sizeof(*new)); new->name = name; new->val = val; return new; } struct property *build_property_delete(char *name) { struct property *new = xmalloc(sizeof(*new)); memset(new, 0, sizeof(*new)); new->name = name; new->deleted = 1; return new; } struct property *chain_property(struct property *first, struct property *list) { assert(first->next == NULL); first->next = list; return first; } struct property *reverse_properties(struct property *first) { struct property *p = first; struct property *head = NULL; struct property *next; while (p) { next = p->next; p->next = head; head = p; p = next; } return head; } struct node *build_node(struct property *proplist, struct node *children) { struct node *new = xmalloc(sizeof(*new)); struct node *child; memset(new, 0, sizeof(*new)); new->proplist = reverse_properties(proplist); new->children = children; for_each_child(new, child) { child->parent = new; } return new; } struct node *build_node_delete(void) { struct node *new = xmalloc(sizeof(*new)); memset(new, 0, sizeof(*new)); new->deleted = 1; return new; } struct node *name_node(struct node *node, char *name) { assert(node->name == NULL); node->name = name; return node; } struct node *merge_nodes(struct node *old_node, struct node *new_node) { struct property *new_prop, *old_prop; struct node *new_child, *old_child; struct label *l; old_node->deleted = 0; /* Add new node labels to old node */ for_each_label_withdel(new_node->labels, l) add_label(&old_node->labels, l->label); /* Move properties from the new node to the old node. If there * is a collision, replace the old value with the new */ while (new_node->proplist) { /* Pop the property off the list */ new_prop = new_node->proplist; new_node->proplist = new_prop->next; new_prop->next = NULL; if (new_prop->deleted) { delete_property_by_name(old_node, new_prop->name); free(new_prop); continue; } /* Look for a collision, set new value if there is */ for_each_property_withdel(old_node, old_prop) { if (streq(old_prop->name, new_prop->name)) { /* Add new labels to old property */ for_each_label_withdel(new_prop->labels, l) add_label(&old_prop->labels, l->label); old_prop->val = new_prop->val; old_prop->deleted = 0; free(new_prop); new_prop = NULL; break; } } /* if no collision occurred, add property to the old node. */ if (new_prop) add_property(old_node, new_prop); } /* Move the override child nodes into the primary node. If * there is a collision, then merge the nodes. */ while (new_node->children) { /* Pop the child node off the list */ new_child = new_node->children; new_node->children = new_child->next_sibling; new_child->parent = NULL; new_child->next_sibling = NULL; if (new_child->deleted) { delete_node_by_name(old_node, new_child->name); free(new_child); continue; } /* Search for a collision. Merge if there is */ for_each_child_withdel(old_node, old_child) { if (streq(old_child->name, new_child->name)) { merge_nodes(old_child, new_child); new_child = NULL; break; } } /* if no collision occured, add child to the old node. */ if (new_child) add_child(old_node, new_child); } /* The new node contents are now merged into the old node. Free * the new node. */ free(new_node); return old_node; } struct node *chain_node(struct node *first, struct node *list) { assert(first->next_sibling == NULL); first->next_sibling = list; return first; } void add_property(struct node *node, struct property *prop) { struct property **p; prop->next = NULL; p = &node->proplist; while (*p) p = &((*p)->next); *p = prop; } void delete_property_by_name(struct node *node, char *name) { struct property *prop = node->proplist; while (prop) { if (!strcmp(prop->name, name)) { delete_property(prop); return; } prop = prop->next; } } void delete_property(struct property *prop) { prop->deleted = 1; delete_labels(&prop->labels); } void add_child(struct node *parent, struct node *child) { struct node **p; child->next_sibling = NULL; child->parent = parent; p = &parent->children; while (*p) p = &((*p)->next_sibling); *p = child; } void delete_node_by_name(struct node *parent, char *name) { struct node *node = parent->children; while (node) { if (!strcmp(node->name, name)) { delete_node(node); return; } node = node->next_sibling; } } void delete_node(struct node *node) { struct property *prop; struct node *child; node->deleted = 1; for_each_child(node, child) delete_node(child); for_each_property(node, prop) delete_property(prop); delete_labels(&node->labels); } struct reserve_info *build_reserve_entry(uint64_t address, uint64_t size) { struct reserve_info *new = xmalloc(sizeof(*new)); memset(new, 0, sizeof(*new)); new->re.address = address; new->re.size = size; return new; } struct reserve_info *chain_reserve_entry(struct reserve_info *first, struct reserve_info *list) { assert(first->next == NULL); first->next = list; return first; } struct reserve_info *add_reserve_entry(struct reserve_info *list, struct reserve_info *new) { struct reserve_info *last; new->next = NULL; if (! list) return new; for (last = list; last->next; last = last->next) ; last->next = new; return list; } struct boot_info *build_boot_info(struct reserve_info *reservelist, struct node *tree, uint32_t boot_cpuid_phys) { struct boot_info *bi; bi = xmalloc(sizeof(*bi)); bi->reservelist = reservelist; bi->dt = tree; bi->boot_cpuid_phys = boot_cpuid_phys; return bi; } /* * Tree accessor functions */ const char *get_unitname(struct node *node) { if (node->name[node->basenamelen] == '\0') return ""; else return node->name + node->basenamelen + 1; } struct property *get_property(struct node *node, const char *propname) { struct property *prop; for_each_property(node, prop) if (streq(prop->name, propname)) return prop; return NULL; } cell_t propval_cell(struct property *prop) { assert(prop->val.len == sizeof(cell_t)); return fdt32_to_cpu(*((cell_t *)prop->val.val)); } struct property *get_property_by_label(struct node *tree, const char *label, struct node **node) { struct property *prop; struct node *c; *node = tree; for_each_property(tree, prop) { struct label *l; for_each_label(prop->labels, l) if (streq(l->label, label)) return prop; } for_each_child(tree, c) { prop = get_property_by_label(c, label, node); if (prop) return prop; } *node = NULL; return NULL; } struct marker *get_marker_label(struct node *tree, const char *label, struct node **node, struct property **prop) { struct marker *m; struct property *p; struct node *c; *node = tree; for_each_property(tree, p) { *prop = p; m = p->val.markers; for_each_marker_of_type(m, LABEL) if (streq(m->ref, label)) return m; } for_each_child(tree, c) { m = get_marker_label(c, label, node, prop); if (m) return m; } *prop = NULL; *node = NULL; return NULL; } struct node *get_subnode(struct node *node, const char *nodename) { struct node *child; for_each_child(node, child) if (streq(child->name, nodename)) return child; return NULL; } struct node *get_node_by_path(struct node *tree, const char *path) { const char *p; struct node *child; if (!path || ! (*path)) { if (tree->deleted) return NULL; return tree; } while (path[0] == '/') path++; p = strchr(path, '/'); for_each_child(tree, child) { if (p && strneq(path, child->name, p-path)) return get_node_by_path(child, p+1); else if (!p && streq(path, child->name)) return child; } return NULL; } struct node *get_node_by_label(struct node *tree, const char *label) { struct node *child, *node; struct label *l; assert(label && (strlen(label) > 0)); for_each_label(tree->labels, l) if (streq(l->label, label)) return tree; for_each_child(tree, child) { node = get_node_by_label(child, label); if (node) return node; } return NULL; } struct node *get_node_by_phandle(struct node *tree, cell_t phandle) { struct node *child, *node; assert((phandle != 0) && (phandle != -1)); if (tree->phandle == phandle) { if (tree->deleted) return NULL; return tree; } for_each_child(tree, child) { node = get_node_by_phandle(child, phandle); if (node) return node; } return NULL; } struct node *get_node_by_ref(struct node *tree, const char *ref) { if (ref[0] == '/') return get_node_by_path(tree, ref); else return get_node_by_label(tree, ref); } cell_t get_node_phandle(struct node *root, struct node *node) { static cell_t phandle = 1; /* FIXME: ick, static local */ if ((node->phandle != 0) && (node->phandle != -1)) return node->phandle; while (get_node_by_phandle(root, phandle)) phandle++; node->phandle = phandle; if (!get_property(node, "linux,phandle") && (phandle_format & PHANDLE_LEGACY)) add_property(node, build_property("linux,phandle", data_append_cell(empty_data, phandle))); if (!get_property(node, "phandle") && (phandle_format & PHANDLE_EPAPR)) add_property(node, build_property("phandle", data_append_cell(empty_data, phandle))); /* If the node *does* have a phandle property, we must * be dealing with a self-referencing phandle, which will be * fixed up momentarily in the caller */ return node->phandle; } uint32_t guess_boot_cpuid(struct node *tree) { struct node *cpus, *bootcpu; struct property *reg; cpus = get_node_by_path(tree, "/cpus"); if (!cpus) return 0; bootcpu = cpus->children; if (!bootcpu) return 0; reg = get_property(bootcpu, "reg"); if (!reg || (reg->val.len != sizeof(uint32_t))) return 0; /* FIXME: Sanity check node? */ return propval_cell(reg); } static int cmp_reserve_info(const void *ax, const void *bx) { const struct reserve_info *a, *b; a = *((const struct reserve_info * const *)ax); b = *((const struct reserve_info * const *)bx); if (a->re.address < b->re.address) return -1; else if (a->re.address > b->re.address) return 1; else if (a->re.size < b->re.size) return -1; else if (a->re.size > b->re.size) return 1; else return 0; } static void sort_reserve_entries(struct boot_info *bi) { struct reserve_info *ri, **tbl; int n = 0, i = 0; for (ri = bi->reservelist; ri; ri = ri->next) n++; if (n == 0) return; tbl = xmalloc(n * sizeof(*tbl)); for (ri = bi->reservelist; ri; ri = ri->next) tbl[i++] = ri; qsort(tbl, n, sizeof(*tbl), cmp_reserve_info); bi->reservelist = tbl[0]; for (i = 0; i < (n-1); i++) tbl[i]->next = tbl[i+1]; tbl[n-1]->next = NULL; free(tbl); } static int cmp_prop(const void *ax, const void *bx) { const struct property *a, *b; a = *((const struct property * const *)ax); b = *((const struct property * const *)bx); return strcmp(a->name, b->name); } static void sort_properties(struct node *node) { int n = 0, i = 0; struct property *prop, **tbl; for_each_property_withdel(node, prop) n++; if (n == 0) return; tbl = xmalloc(n * sizeof(*tbl)); for_each_property_withdel(node, prop) tbl[i++] = prop; qsort(tbl, n, sizeof(*tbl), cmp_prop); node->proplist = tbl[0]; for (i = 0; i < (n-1); i++) tbl[i]->next = tbl[i+1]; tbl[n-1]->next = NULL; free(tbl); } static int cmp_subnode(const void *ax, const void *bx) { const struct node *a, *b; a = *((const struct node * const *)ax); b = *((const struct node * const *)bx); return strcmp(a->name, b->name); } static void sort_subnodes(struct node *node) { int n = 0, i = 0; struct node *subnode, **tbl; for_each_child_withdel(node, subnode) n++; if (n == 0) return; tbl = xmalloc(n * sizeof(*tbl)); for_each_child_withdel(node, subnode) tbl[i++] = subnode; qsort(tbl, n, sizeof(*tbl), cmp_subnode); node->children = tbl[0]; for (i = 0; i < (n-1); i++) tbl[i]->next_sibling = tbl[i+1]; tbl[n-1]->next_sibling = NULL; free(tbl); } static void sort_node(struct node *node) { struct node *c; sort_properties(node); sort_subnodes(node); for_each_child_withdel(node, c) sort_node(c); } void sort_tree(struct boot_info *bi) { sort_reserve_entries(bi); sort_node(bi->dt); }
gpl-2.0
gokhanmoral/siyah-xt
security/keys/permission.c
7461
2968
/* Key permission checking * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/security.h> #include "internal.h" /** * key_task_permission - Check a key can be used * @key_ref: The key to check. * @cred: The credentials to use. * @perm: The permissions to check for. * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. * * The caller must hold either a ref on cred or must hold the RCU readlock. * * Returns 0 if successful, -EACCES if access is denied based on the * permissions bits or the LSM check. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); if (key->user->user_ns != cred->user->user_ns) goto use_other_perms; /* use the second 8-bits of permissions for keys the caller owns */ if (key->uid == cred->fsuid) { kperm = key->perm >> 16; goto use_these_perms; } /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ if (key->gid != -1 && key->perm & KEY_GRP_ALL) { if (key->gid == cred->fsgid) { kperm = key->perm >> 8; goto use_these_perms; } ret = groups_search(cred->group_info, key->gid); if (ret) { kperm = key->perm >> 8; goto use_these_perms; } } use_other_perms: /* otherwise use the least-significant 8-bits */ kperm = key->perm; use_these_perms: /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions */ if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; kperm = kperm & perm & KEY_ALL; if (kperm != perm) return -EACCES; /* let LSM be the final arbiter */ return security_key_permission(key_ref, cred, perm); } EXPORT_SYMBOL(key_task_permission); /** * key_validate - Validate a key. * @key: The key to be validated. * * Check that a key is valid, returning 0 if the key is okay, -EKEYREVOKED if * the key's type has been removed or if the key has been revoked or * -EKEYEXPIRED if the key has expired. */ int key_validate(struct key *key) { struct timespec now; int ret = 0; if (key) { /* check it's still accessible */ ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &key->flags) || test_bit(KEY_FLAG_DEAD, &key->flags)) goto error; /* check it hasn't expired */ ret = 0; if (key->expiry) { now = current_kernel_time(); if (now.tv_sec >= key->expiry) ret = -EKEYEXPIRED; } } error: return ret; } EXPORT_SYMBOL(key_validate);
gpl-2.0
darkknight1812/d851_kernel
security/keys/permission.c
7461
2968
/* Key permission checking * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/security.h> #include "internal.h" /** * key_task_permission - Check a key can be used * @key_ref: The key to check. * @cred: The credentials to use. * @perm: The permissions to check for. * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. * * The caller must hold either a ref on cred or must hold the RCU readlock. * * Returns 0 if successful, -EACCES if access is denied based on the * permissions bits or the LSM check. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); if (key->user->user_ns != cred->user->user_ns) goto use_other_perms; /* use the second 8-bits of permissions for keys the caller owns */ if (key->uid == cred->fsuid) { kperm = key->perm >> 16; goto use_these_perms; } /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ if (key->gid != -1 && key->perm & KEY_GRP_ALL) { if (key->gid == cred->fsgid) { kperm = key->perm >> 8; goto use_these_perms; } ret = groups_search(cred->group_info, key->gid); if (ret) { kperm = key->perm >> 8; goto use_these_perms; } } use_other_perms: /* otherwise use the least-significant 8-bits */ kperm = key->perm; use_these_perms: /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions */ if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; kperm = kperm & perm & KEY_ALL; if (kperm != perm) return -EACCES; /* let LSM be the final arbiter */ return security_key_permission(key_ref, cred, perm); } EXPORT_SYMBOL(key_task_permission); /** * key_validate - Validate a key. * @key: The key to be validated. * * Check that a key is valid, returning 0 if the key is okay, -EKEYREVOKED if * the key's type has been removed or if the key has been revoked or * -EKEYEXPIRED if the key has expired. */ int key_validate(struct key *key) { struct timespec now; int ret = 0; if (key) { /* check it's still accessible */ ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &key->flags) || test_bit(KEY_FLAG_DEAD, &key->flags)) goto error; /* check it hasn't expired */ ret = 0; if (key->expiry) { now = current_kernel_time(); if (now.tv_sec >= key->expiry) ret = -EKEYEXPIRED; } } error: return ret; } EXPORT_SYMBOL(key_validate);
gpl-2.0
faust93/Ak-xGenesis-geehrc
arch/c6x/platforms/cache.c
7973
10140
/* * Copyright (C) 2011 Texas Instruments Incorporated * Author: Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <asm/cache.h> #include <asm/soc.h> /* * Internal Memory Control Registers for caches */ #define IMCR_CCFG 0x0000 #define IMCR_L1PCFG 0x0020 #define IMCR_L1PCC 0x0024 #define IMCR_L1DCFG 0x0040 #define IMCR_L1DCC 0x0044 #define IMCR_L2ALLOC0 0x2000 #define IMCR_L2ALLOC1 0x2004 #define IMCR_L2ALLOC2 0x2008 #define IMCR_L2ALLOC3 0x200c #define IMCR_L2WBAR 0x4000 #define IMCR_L2WWC 0x4004 #define IMCR_L2WIBAR 0x4010 #define IMCR_L2WIWC 0x4014 #define IMCR_L2IBAR 0x4018 #define IMCR_L2IWC 0x401c #define IMCR_L1PIBAR 0x4020 #define IMCR_L1PIWC 0x4024 #define IMCR_L1DWIBAR 0x4030 #define IMCR_L1DWIWC 0x4034 #define IMCR_L1DWBAR 0x4040 #define IMCR_L1DWWC 0x4044 #define IMCR_L1DIBAR 0x4048 #define IMCR_L1DIWC 0x404c #define IMCR_L2WB 0x5000 #define IMCR_L2WBINV 0x5004 #define IMCR_L2INV 0x5008 #define IMCR_L1PINV 0x5028 #define IMCR_L1DWB 0x5040 #define IMCR_L1DWBINV 0x5044 #define IMCR_L1DINV 0x5048 #define IMCR_MAR_BASE 0x8000 #define IMCR_MAR96_111 0x8180 #define IMCR_MAR128_191 0x8200 #define IMCR_MAR224_239 0x8380 #define IMCR_L2MPFAR 0xa000 #define IMCR_L2MPFSR 0xa004 #define IMCR_L2MPFCR 0xa008 #define IMCR_L2MPLK0 0xa100 #define IMCR_L2MPLK1 0xa104 #define IMCR_L2MPLK2 0xa108 #define IMCR_L2MPLK3 0xa10c #define IMCR_L2MPLKCMD 0xa110 #define IMCR_L2MPLKSTAT 0xa114 #define IMCR_L2MPPA_BASE 0xa200 #define IMCR_L1PMPFAR 0xa400 #define IMCR_L1PMPFSR 0xa404 #define IMCR_L1PMPFCR 0xa408 #define IMCR_L1PMPLK0 0xa500 #define IMCR_L1PMPLK1 0xa504 #define IMCR_L1PMPLK2 0xa508 #define IMCR_L1PMPLK3 0xa50c #define IMCR_L1PMPLKCMD 0xa510 #define IMCR_L1PMPLKSTAT 0xa514 #define IMCR_L1PMPPA_BASE 0xa600 #define IMCR_L1DMPFAR 0xac00 #define IMCR_L1DMPFSR 0xac04 #define IMCR_L1DMPFCR 0xac08 #define IMCR_L1DMPLK0 0xad00 #define IMCR_L1DMPLK1 0xad04 #define IMCR_L1DMPLK2 0xad08 #define IMCR_L1DMPLK3 0xad0c #define IMCR_L1DMPLKCMD 0xad10 #define IMCR_L1DMPLKSTAT 0xad14 #define IMCR_L1DMPPA_BASE 0xae00 #define IMCR_L2PDWAKE0 0xc040 #define IMCR_L2PDWAKE1 0xc044 #define IMCR_L2PDSLEEP0 0xc050 #define IMCR_L2PDSLEEP1 0xc054 #define IMCR_L2PDSTAT0 0xc060 #define IMCR_L2PDSTAT1 0xc064 /* * CCFG register values and bits */ #define L2MODE_0K_CACHE 0x0 #define L2MODE_32K_CACHE 0x1 #define L2MODE_64K_CACHE 0x2 #define L2MODE_128K_CACHE 0x3 #define L2MODE_256K_CACHE 0x7 #define L2PRIO_URGENT 0x0 #define L2PRIO_HIGH 0x1 #define L2PRIO_MEDIUM 0x2 #define L2PRIO_LOW 0x3 #define CCFG_ID 0x100 /* Invalidate L1P bit */ #define CCFG_IP 0x200 /* Invalidate L1D bit */ static void __iomem *cache_base; /* * L1 & L2 caches generic functions */ #define imcr_get(reg) soc_readl(cache_base + (reg)) #define imcr_set(reg, value) \ do { \ soc_writel((value), cache_base + (reg)); \ soc_readl(cache_base + (reg)); \ } while (0) static void cache_block_operation_wait(unsigned int wc_reg) { /* Wait for completion */ while (imcr_get(wc_reg)) cpu_relax(); } static DEFINE_SPINLOCK(cache_lock); /* * Generic function to perform a block cache operation as * invalidate or writeback/invalidate */ static void cache_block_operation(unsigned int *start, unsigned int *end, unsigned int bar_reg, unsigned int wc_reg) { unsigned long flags; unsigned int wcnt = (L2_CACHE_ALIGN_CNT((unsigned int) end) - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; unsigned int wc = 0; for (; wcnt; wcnt -= wc, start += wc) { loop: spin_lock_irqsave(&cache_lock, flags); /* * If another cache operation is occuring */ if (unlikely(imcr_get(wc_reg))) { spin_unlock_irqrestore(&cache_lock, flags); /* Wait for previous operation completion */ cache_block_operation_wait(wc_reg); /* Try again */ goto loop; } imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); if (wcnt > 0xffff) wc = 0xffff; else wc = wcnt; /* Set word count value in the WC register */ imcr_set(wc_reg, wc & 0xffff); spin_unlock_irqrestore(&cache_lock, flags); /* Wait for completion */ cache_block_operation_wait(wc_reg); } } static void cache_block_operation_nowait(unsigned int *start, unsigned int *end, unsigned int bar_reg, unsigned int wc_reg) { unsigned long flags; unsigned int wcnt = (L2_CACHE_ALIGN_CNT((unsigned int) end) - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; unsigned int wc = 0; for (; wcnt; wcnt -= wc, start += wc) { spin_lock_irqsave(&cache_lock, flags); imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); if (wcnt > 0xffff) wc = 0xffff; else wc = wcnt; /* Set word count value in the WC register */ imcr_set(wc_reg, wc & 0xffff); spin_unlock_irqrestore(&cache_lock, flags); /* Don't wait for completion on last cache operation */ if (wcnt > 0xffff) cache_block_operation_wait(wc_reg); } } /* * L1 caches management */ /* * Disable L1 caches */ void L1_cache_off(void) { unsigned int dummy; imcr_set(IMCR_L1PCFG, 0); dummy = imcr_get(IMCR_L1PCFG); imcr_set(IMCR_L1DCFG, 0); dummy = imcr_get(IMCR_L1DCFG); } /* * Enable L1 caches */ void L1_cache_on(void) { unsigned int dummy; imcr_set(IMCR_L1PCFG, 7); dummy = imcr_get(IMCR_L1PCFG); imcr_set(IMCR_L1DCFG, 7); dummy = imcr_get(IMCR_L1DCFG); } /* * L1P global-invalidate all */ void L1P_cache_global_invalidate(void) { unsigned int set = 1; imcr_set(IMCR_L1PINV, set); while (imcr_get(IMCR_L1PINV) & 1) cpu_relax(); } /* * L1D global-invalidate all * * Warning: this operation causes all updated data in L1D to * be discarded rather than written back to the lower levels of * memory */ void L1D_cache_global_invalidate(void) { unsigned int set = 1; imcr_set(IMCR_L1DINV, set); while (imcr_get(IMCR_L1DINV) & 1) cpu_relax(); } void L1D_cache_global_writeback(void) { unsigned int set = 1; imcr_set(IMCR_L1DWB, set); while (imcr_get(IMCR_L1DWB) & 1) cpu_relax(); } void L1D_cache_global_writeback_invalidate(void) { unsigned int set = 1; imcr_set(IMCR_L1DWBINV, set); while (imcr_get(IMCR_L1DWBINV) & 1) cpu_relax(); } /* * L2 caches management */ /* * Set L2 operation mode */ void L2_cache_set_mode(unsigned int mode) { unsigned int ccfg = imcr_get(IMCR_CCFG); /* Clear and set the L2MODE bits in CCFG */ ccfg &= ~7; ccfg |= (mode & 7); imcr_set(IMCR_CCFG, ccfg); ccfg = imcr_get(IMCR_CCFG); } /* * L2 global-writeback and global-invalidate all */ void L2_cache_global_writeback_invalidate(void) { imcr_set(IMCR_L2WBINV, 1); while (imcr_get(IMCR_L2WBINV)) cpu_relax(); } /* * L2 global-writeback all */ void L2_cache_global_writeback(void) { imcr_set(IMCR_L2WB, 1); while (imcr_get(IMCR_L2WB)) cpu_relax(); } /* * Cacheability controls */ void enable_caching(unsigned long start, unsigned long end) { unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); for (; mar <= mar_e; mar += 4) imcr_set(mar, imcr_get(mar) | 1); } void disable_caching(unsigned long start, unsigned long end) { unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); for (; mar <= mar_e; mar += 4) imcr_set(mar, imcr_get(mar) & ~1); } /* * L1 block operations */ void L1P_cache_block_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1PIBAR, IMCR_L1PIWC); } void L1D_cache_block_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1DIBAR, IMCR_L1DIWC); } void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1DWIBAR, IMCR_L1DWIWC); } void L1D_cache_block_writeback(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1DWBAR, IMCR_L1DWWC); } /* * L2 block operations */ void L2_cache_block_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L2IBAR, IMCR_L2IWC); } void L2_cache_block_writeback(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L2WBAR, IMCR_L2WWC); } void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L2WIBAR, IMCR_L2WIWC); } void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end) { cache_block_operation_nowait((unsigned int *) start, (unsigned int *) end, IMCR_L2IBAR, IMCR_L2IWC); } void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end) { cache_block_operation_nowait((unsigned int *) start, (unsigned int *) end, IMCR_L2WBAR, IMCR_L2WWC); } void L2_cache_block_writeback_invalidate_nowait(unsigned int start, unsigned int end) { cache_block_operation_nowait((unsigned int *) start, (unsigned int *) end, IMCR_L2WIBAR, IMCR_L2WIWC); } /* * L1 and L2 caches configuration */ void __init c6x_cache_init(void) { struct device_node *node; node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache"); if (!node) return; cache_base = of_iomap(node, 0); of_node_put(node); if (!cache_base) return; /* Set L2 caches on the the whole L2 SRAM memory */ L2_cache_set_mode(L2MODE_SIZE); /* Enable L1 */ L1_cache_on(); }
gpl-2.0
mohamaadhosein/VIPER-KERNEL-D802
arch/parisc/math-emu/fpudispatch.c
8741
39225
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/fp/fpudispatch.c $Revision: 1.1 $ * * Purpose: * <<please update with a synopsis of the functionality provided by this file>> * * External Interfaces: * <<the following list was autogenerated, please review>> * emfpudispatch(ir, dummy1, dummy2, fpregs) * fpudispatch(ir, excp_code, holder, fpregs) * * Internal Interfaces: * <<the following list was autogenerated, please review>> * static u_int decode_06(u_int, u_int *) * static u_int decode_0c(u_int, u_int, u_int, u_int *) * static u_int decode_0e(u_int, u_int, u_int, u_int *) * static u_int decode_26(u_int, u_int *) * static u_int decode_2e(u_int, u_int *) * static void update_status_cbit(u_int *, u_int, u_int, u_int) * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #define FPUDEBUG 0 #include "float.h" #include <linux/bug.h> #include <linux/kernel.h> #include <asm/processor.h> /* #include <sys/debug.h> */ /* #include <machine/sys/mdep_private.h> */ #define COPR_INST 0x30000000 /* * definition of extru macro. If pos and len are constants, the compiler * will generate an extru instruction when optimized */ #define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1)) /* definitions of bit field locations in the instruction */ #define fpmajorpos 5 #define fpr1pos 10 #define fpr2pos 15 #define fptpos 31 #define fpsubpos 18 #define fpclass1subpos 16 #define fpclasspos 22 #define fpfmtpos 20 #define fpdfpos 18 #define fpnulpos 26 /* * the following are the extra bits for the 0E major op */ #define fpxr1pos 24 #define fpxr2pos 19 #define fpxtpos 25 #define fpxpos 23 #define fp0efmtpos 20 /* * the following are for the multi-ops */ #define fprm1pos 10 #define fprm2pos 15 #define fptmpos 31 #define fprapos 25 #define fptapos 20 #define fpmultifmt 26 /* * the following are for the fused FP instructions */ /* fprm1pos 10 */ /* fprm2pos 15 */ #define fpraupos 18 #define fpxrm2pos 19 /* fpfmtpos 20 */ #define fpralpos 23 #define fpxrm1pos 24 /* fpxtpos 25 */ #define fpfusedsubop 26 /* fptpos 31 */ /* * offset to constant zero in the FP emulation registers */ #define fpzeroreg (32*sizeof(double)/sizeof(u_int)) /* * extract the major opcode from the instruction */ #define get_major(op) extru(op,fpmajorpos,6) /* * extract the two bit class field from the FP instruction. The class is at bit * positions 21-22 */ #define get_class(op) extru(op,fpclasspos,2) /* * extract the 3 bit subop field. For all but class 1 instructions, it is * located at bit positions 16-18 */ #define get_subop(op) extru(op,fpsubpos,3) /* * extract the 2 or 3 bit subop field from class 1 instructions. It is located * at bit positions 15-16 (PA1.1) or 14-16 (PA2.0) */ #define get_subop1_PA1_1(op) extru(op,fpclass1subpos,2) /* PA89 (1.1) fmt */ #define get_subop1_PA2_0(op) extru(op,fpclass1subpos,3) /* PA 2.0 fmt */ /* definitions of unimplemented exceptions */ #define MAJOR_0C_EXCP 0x09 #define MAJOR_0E_EXCP 0x0b #define MAJOR_06_EXCP 0x03 #define MAJOR_26_EXCP 0x23 #define MAJOR_2E_EXCP 0x2b #define PA83_UNIMP_EXCP 0x01 /* * Special Defines for TIMEX specific code */ #define FPU_TYPE_FLAG_POS (EM_FPU_TYPE_OFFSET>>2) #define TIMEX_ROLEX_FPU_MASK (TIMEX_EXTEN_FLAG|ROLEX_EXTEN_FLAG) /* * Static function definitions */ #define _PROTOTYPES #if defined(_PROTOTYPES) || defined(_lint) static u_int decode_0c(u_int, u_int, u_int, u_int *); static u_int decode_0e(u_int, u_int, u_int, u_int *); static u_int decode_06(u_int, u_int *); static u_int decode_26(u_int, u_int *); static u_int decode_2e(u_int, u_int *); static void update_status_cbit(u_int *, u_int, u_int, u_int); #else /* !_PROTOTYPES&&!_lint */ static u_int decode_0c(); static u_int decode_0e(); static u_int decode_06(); static u_int decode_26(); static u_int decode_2e(); static void update_status_cbit(); #endif /* _PROTOTYPES&&!_lint */ #define VASSERT(x) static void parisc_linux_get_fpu_type(u_int fpregs[]) { /* on pa-linux the fpu type is not filled in by the * caller; it is constructed here */ if (boot_cpu_data.cpu_type == pcxs) fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG; else if (boot_cpu_data.cpu_type == pcxt || boot_cpu_data.cpu_type == pcxt_) fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG; else if (boot_cpu_data.cpu_type >= pcxu) fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG; } /* * this routine will decode the excepting floating point instruction and * call the approiate emulation routine. * It is called by decode_fpu with the following parameters: * fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register) * where current_ir is the instruction to be emulated, * unimplemented_code is the exception_code that the hardware generated * and &Fpu_register is the address of emulated FP reg 0. */ u_int fpudispatch(u_int ir, u_int excp_code, u_int holder, u_int fpregs[]) { u_int class, subop; u_int fpu_type_flags; /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); parisc_linux_get_fpu_type(fpregs); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ class = get_class(ir); if (class == 1) { if (fpu_type_flags & PA2_0_FPU_FLAG) subop = get_subop1_PA2_0(ir); else subop = get_subop1_PA1_1(ir); } else subop = get_subop(ir); if (FPUDEBUG) printk("class %d subop %d\n", class, subop); switch (excp_code) { case MAJOR_0C_EXCP: case PA83_UNIMP_EXCP: return(decode_0c(ir,class,subop,fpregs)); case MAJOR_0E_EXCP: return(decode_0e(ir,class,subop,fpregs)); case MAJOR_06_EXCP: return(decode_06(ir,fpregs)); case MAJOR_26_EXCP: return(decode_26(ir,fpregs)); case MAJOR_2E_EXCP: return(decode_2e(ir,fpregs)); default: /* "crashme Night Gallery painting nr 2. (asm_crash.s). * This was fixed for multi-user kernels, but * workstation kernels had a panic here. This allowed * any arbitrary user to panic the kernel by executing * setting the FP exception registers to strange values * and generating an emulation trap. The emulation and * exception code must never be able to panic the * kernel. */ return(UNIMPLEMENTEDEXCEPTION); } } /* * this routine is called by $emulation_trap to emulate a coprocessor * instruction if one doesn't exist */ u_int emfpudispatch(u_int ir, u_int dummy1, u_int dummy2, u_int fpregs[]) { u_int class, subop, major; u_int fpu_type_flags; /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ major = get_major(ir); class = get_class(ir); if (class == 1) { if (fpu_type_flags & PA2_0_FPU_FLAG) subop = get_subop1_PA2_0(ir); else subop = get_subop1_PA1_1(ir); } else subop = get_subop(ir); switch (major) { case 0x0C: return(decode_0c(ir,class,subop,fpregs)); case 0x0E: return(decode_0e(ir,class,subop,fpregs)); case 0x06: return(decode_06(ir,fpregs)); case 0x26: return(decode_26(ir,fpregs)); case 0x2E: return(decode_2e(ir,fpregs)); default: return(PA83_UNIMP_EXCP); } } static u_int decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) { u_int r1,r2,t; /* operand register offsets */ u_int fmt; /* also sf for class 1 conversions */ u_int df; /* for class 1 conversions */ u_int *status; u_int retval, local_status; u_int fpu_type_flags; if (ir == COPR_INST) { fpregs[0] = EMULATION_VERSION << 11; return(NOEXCEPTION); } status = &fpregs[0]; /* fp status register */ local_status = fpregs[0]; /* and local copy */ r1 = extru(ir,fpr1pos,5) * sizeof(double)/sizeof(u_int); if (r1 == 0) /* map fr0 source to constant zero */ r1 = fpzeroreg; t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int); if (t == 0 && class != 2) /* don't allow fr0 as a dest */ return(MAJOR_0C_EXCP); fmt = extru(ir,fpfmtpos,2); /* get fmt completer */ switch (class) { case 0: switch (subop) { case 0: /* COPR 0,0 emulated above*/ case 1: return(MAJOR_0C_EXCP); case 2: /* FCPY */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } case 3: /* FABS */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and clear sign bit */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } case 6: /* FNEG */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and invert sign bit */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } case 7: /* FNEGABS */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and set sign bit */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } case 4: /* FSQRT */ switch (fmt) { case 0: return(sgl_fsqrt(&fpregs[r1],0, &fpregs[t],status)); case 1: return(dbl_fsqrt(&fpregs[r1],0, &fpregs[t],status)); case 2: case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 5: /* FRND */ switch (fmt) { case 0: return(sgl_frnd(&fpregs[r1],0, &fpregs[t],status)); case 1: return(dbl_frnd(&fpregs[r1],0, &fpregs[t],status)); case 2: case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } /* end of switch (subop) */ case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ if ((df & 2) || (fmt & 2)) { /* * fmt's 2 and 3 are illegal of not implemented * quad conversions */ return(MAJOR_0C_EXCP); } /* * encode source and dest formats into 2 bits. * high bit is source, low bit is dest. * bit = 1 --> double precision */ fmt = (fmt << 1) | df; switch (subop) { case 0: /* FCNVFF */ switch(fmt) { case 0: /* sgl/sgl */ return(MAJOR_0C_EXCP); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(MAJOR_0C_EXCP); } case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ case 2: /* class 2 */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int); if (r2 == 0) r2 = fpzeroreg; if (fpu_type_flags & PA2_0_FPU_FLAG) { /* FTEST if nullify bit set, otherwise FCMP */ if (extru(ir, fpnulpos, 1)) { /* FTEST */ switch (fmt) { case 0: /* * arg0 is not used * second param is the t field used for * ftest,acc and ftest,rej * third param is the subop (y-field) */ BUG(); /* Unsupported * return(ftest(0L,extru(ir,fptpos,5), * &fpregs[0],subop)); */ case 1: case 2: case 3: return(MAJOR_0C_EXCP); } } else { /* FCMP */ switch (fmt) { case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } } /* end of if for PA2.0 */ else { /* PA1.0 & PA1.1 */ switch (subop) { case 2: case 3: case 4: case 5: case 6: case 7: return(MAJOR_0C_EXCP); case 0: /* FCMP */ switch (fmt) { case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 1: /* FTEST */ switch (fmt) { case 0: /* * arg0 is not used * second param is the t field used for * ftest,acc and ftest,rej * third param is the subop (y-field) */ BUG(); /* unsupported * return(ftest(0L,extru(ir,fptpos,5), * &fpregs[0],subop)); */ case 1: case 2: case 3: return(MAJOR_0C_EXCP); } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ case 3: /* class 3 */ r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int); if (r2 == 0) r2 = fpzeroreg; switch (subop) { case 5: case 6: case 7: return(MAJOR_0C_EXCP); case 0: /* FADD */ switch (fmt) { case 0: return(sgl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 1: /* FSUB */ switch (fmt) { case 0: return(sgl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 2: /* FMPY */ switch (fmt) { case 0: return(sgl_fmpy(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fmpy(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 3: /* FDIV */ switch (fmt) { case 0: return(sgl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 4: /* FREM */ switch (fmt) { case 0: return(sgl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } /* end of class 3 switch */ } /* end of switch(class) */ /* If we get here, something is really wrong! */ return(MAJOR_0C_EXCP); } static u_int decode_0e(ir,class,subop,fpregs) u_int ir,class,subop; u_int fpregs[]; { u_int r1,r2,t; /* operand register offsets */ u_int fmt; /* also sf for class 1 conversions */ u_int df; /* dest format for class 1 conversions */ u_int *status; u_int retval, local_status; u_int fpu_type_flags; status = &fpregs[0]; local_status = fpregs[0]; r1 = ((extru(ir,fpr1pos,5)<<1)|(extru(ir,fpxr1pos,1))); if (r1 == 0) r1 = fpzeroreg; t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1))); if (t == 0 && class != 2) return(MAJOR_0E_EXCP); if (class < 2) /* class 0 or 1 has 2 bit fmt */ fmt = extru(ir,fpfmtpos,2); else /* class 2 and 3 have 1 bit fmt */ fmt = extru(ir,fp0efmtpos,1); /* * An undefined combination, double precision accessing the * right half of a FPR, can get us into trouble. * Let's just force proper alignment on it. */ if (fmt == DBL) { r1 &= ~1; if (class != 1) t &= ~1; } switch (class) { case 0: switch (subop) { case 0: /* unimplemented */ case 1: return(MAJOR_0E_EXCP); case 2: /* FCPY */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } case 3: /* FABS */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } case 6: /* FNEG */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } case 7: /* FNEGABS */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } case 4: /* FSQRT */ switch (fmt) { case 0: return(sgl_fsqrt(&fpregs[r1],0, &fpregs[t], status)); case 1: return(dbl_fsqrt(&fpregs[r1],0, &fpregs[t], status)); case 2: case 3: return(MAJOR_0E_EXCP); } case 5: /* FRMD */ switch (fmt) { case 0: return(sgl_frnd(&fpregs[r1],0, &fpregs[t], status)); case 1: return(dbl_frnd(&fpregs[r1],0, &fpregs[t], status)); case 2: case 3: return(MAJOR_0E_EXCP); } } /* end of switch (subop */ case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ /* * Fix Crashme problem (writing to 31R in double precision) * here too. */ if (df == DBL) { t &= ~1; } if ((df & 2) || (fmt & 2)) return(MAJOR_0E_EXCP); fmt = (fmt << 1) | df; switch (subop) { case 0: /* FCNVFF */ switch(fmt) { case 0: /* sgl/sgl */ return(MAJOR_0E_EXCP); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(MAJOR_0E_EXCP); } case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ case 2: /* class 2 */ /* * Be careful out there. * Crashme can generate cases where FR31R is specified * as the source or target of a double precision operation. * Since we just pass the address of the floating-point * register to the emulation routines, this can cause * corruption of fpzeroreg. */ if (fmt == DBL) r2 = (extru(ir,fpr2pos,5)<<1); else r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1))); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; if (r2 == 0) r2 = fpzeroreg; if (fpu_type_flags & PA2_0_FPU_FLAG) { /* FTEST if nullify bit set, otherwise FCMP */ if (extru(ir, fpnulpos, 1)) { /* FTEST */ /* not legal */ return(MAJOR_0E_EXCP); } else { /* FCMP */ switch (fmt) { /* * fmt is only 1 bit long */ case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); } } } /* end of if for PA2.0 */ else { /* PA1.0 & PA1.1 */ switch (subop) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: return(MAJOR_0E_EXCP); case 0: /* FCMP */ switch (fmt) { /* * fmt is only 1 bit long */ case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ case 3: /* class 3 */ /* * Be careful out there. * Crashme can generate cases where FR31R is specified * as the source or target of a double precision operation. * Since we just pass the address of the floating-point * register to the emulation routines, this can cause * corruption of fpzeroreg. */ if (fmt == DBL) r2 = (extru(ir,fpr2pos,5)<<1); else r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1))); if (r2 == 0) r2 = fpzeroreg; switch (subop) { case 5: case 6: case 7: return(MAJOR_0E_EXCP); /* * Note that fmt is only 1 bit for class 3 */ case 0: /* FADD */ switch (fmt) { case 0: return(sgl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 1: /* FSUB */ switch (fmt) { case 0: return(sgl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 2: /* FMPY or XMPYU */ /* * check for integer multiply (x bit set) */ if (extru(ir,fpxpos,1)) { /* * emulate XMPYU */ switch (fmt) { case 0: /* * bad instruction if t specifies * the right half of a register */ if (t & 1) return(MAJOR_0E_EXCP); BUG(); /* unsupported * impyu(&fpregs[r1],&fpregs[r2], * &fpregs[t]); */ return(NOEXCEPTION); case 1: return(MAJOR_0E_EXCP); } } else { /* FMPY */ switch (fmt) { case 0: return(sgl_fmpy(&fpregs[r1], &fpregs[r2],&fpregs[t],status)); case 1: return(dbl_fmpy(&fpregs[r1], &fpregs[r2],&fpregs[t],status)); } } case 3: /* FDIV */ switch (fmt) { case 0: return(sgl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 4: /* FREM */ switch (fmt) { case 0: return(sgl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } } /* end of class 3 switch */ } /* end of switch(class) */ /* If we get here, something is really wrong! */ return(MAJOR_0E_EXCP); } /* * routine to decode the 06 (FMPYADD and FMPYCFXT) instruction */ static u_int decode_06(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, tm, ra, ta; /* operands */ u_int fmt; u_int error = 0; u_int status; u_int fpu_type_flags; union { double dbl; float flt; struct { u_int i1; u_int i2; } ints; } mtmp, atmp; status = fpregs[0]; /* use a local copy of status reg */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */ if (fmt == 0) { /* DBL */ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int); if (tm == 0) return(MAJOR_06_EXCP); ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int); ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int); if (ta == 0) return(MAJOR_06_EXCP); if (fpu_type_flags & TIMEX_ROLEX_FPU_MASK) { if (ra == 0) { /* special case FMPYCFXT, see sgl case below */ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2], &mtmp.ints.i1,&status)) error = 1; if (dbl_to_sgl_fcnvfxt(&fpregs[ta], &atmp.ints.i1,&atmp.ints.i1,&status)) error = 1; } else { if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } } else { if (ra == 0) ra = fpzeroreg; if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } if (error) return(MAJOR_06_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[tm+1] = mtmp.ints.i2; fpregs[ta] = atmp.ints.i1; fpregs[ta+1] = atmp.ints.i2; fpregs[0] = status; return(NOEXCEPTION); } } else { /* SGL */ /* * calculate offsets for single precision numbers * See table 6-14 in PA-89 architecture for mapping */ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */ tm |= extru(ir,fptmpos-4,1); /* add right word offset */ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */ ra |= extru(ir,fprapos-4,1); /* add right word offset */ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */ ta |= extru(ir,fptapos-4,1); /* add right word offset */ if (ra == 0x20 &&(fpu_type_flags & TIMEX_ROLEX_FPU_MASK)) { /* special case FMPYCFXT (really 0) * This instruction is only present on the Timex and * Rolex fpu's in so if it is the special case and * one of these fpu's we run the FMPYCFXT instruction */ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (sgl_to_sgl_fcnvfxt(&fpregs[ta],&atmp.ints.i1, &atmp.ints.i1,&status)) error = 1; } else { if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (sgl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } if (error) return(MAJOR_06_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[ta] = atmp.ints.i1; fpregs[0] = status; return(NOEXCEPTION); } } } /* * routine to decode the 26 (FMPYSUB) instruction */ static u_int decode_26(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, tm, ra, ta; /* operands */ u_int fmt; u_int error = 0; u_int status; union { double dbl; float flt; struct { u_int i1; u_int i2; } ints; } mtmp, atmp; status = fpregs[0]; fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */ if (fmt == 0) { /* DBL */ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int); if (tm == 0) return(MAJOR_26_EXCP); ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int); if (ra == 0) return(MAJOR_26_EXCP); ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int); if (ta == 0) return(MAJOR_26_EXCP); if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status)) error = 1; if (dbl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status)) error = 1; if (error) return(MAJOR_26_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[tm+1] = mtmp.ints.i2; fpregs[ta] = atmp.ints.i1; fpregs[ta+1] = atmp.ints.i2; fpregs[0] = status; return(NOEXCEPTION); } } else { /* SGL */ /* * calculate offsets for single precision numbers * See table 6-14 in PA-89 architecture for mapping */ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */ tm |= extru(ir,fptmpos-4,1); /* add right word offset */ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */ ra |= extru(ir,fprapos-4,1); /* add right word offset */ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */ ta |= extru(ir,fptapos-4,1); /* add right word offset */ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status)) error = 1; if (sgl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status)) error = 1; if (error) return(MAJOR_26_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[ta] = atmp.ints.i1; fpregs[0] = status; return(NOEXCEPTION); } } } /* * routine to decode the 2E (FMPYFADD,FMPYNFADD) instructions */ static u_int decode_2e(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, ra, t; /* operands */ u_int fmt; fmt = extru(ir,fpfmtpos,1); /* get fmt completer */ if (fmt == DBL) { /* DBL */ rm1 = extru(ir,fprm1pos,5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir,fprm2pos,5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; ra = ((extru(ir,fpraupos,3)<<2)|(extru(ir,fpralpos,3)>>1)) * sizeof(double)/sizeof(u_int); if (ra == 0) ra = fpzeroreg; t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int); if (t == 0) return(MAJOR_2E_EXCP); if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */ return(dbl_fmpynfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } else { return(dbl_fmpyfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } } /* end DBL */ else { /* SGL */ rm1 = (extru(ir,fprm1pos,5)<<1)|(extru(ir,fpxrm1pos,1)); if (rm1 == 0) rm1 = fpzeroreg; rm2 = (extru(ir,fprm2pos,5)<<1)|(extru(ir,fpxrm2pos,1)); if (rm2 == 0) rm2 = fpzeroreg; ra = (extru(ir,fpraupos,3)<<3)|extru(ir,fpralpos,3); if (ra == 0) ra = fpzeroreg; t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1))); if (t == 0) return(MAJOR_2E_EXCP); if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */ return(sgl_fmpynfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } else { return(sgl_fmpyfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } } /* end SGL */ } /* * update_status_cbit * * This routine returns the correct FP status register value in * *status, based on the C-bit & V-bit returned by the FCMP * emulation routine in new_status. The architecture type * (PA83, PA89 or PA2.0) is available in fpu_type. The y_field * and the architecture type are used to determine what flavor * of FCMP is being emulated. */ static void update_status_cbit(status, new_status, fpu_type, y_field) u_int *status, new_status; u_int fpu_type; u_int y_field; { /* * For PA89 FPU's which implement the Compare Queue and * for PA2.0 FPU's, update the Compare Queue if the y-field = 0, * otherwise update the specified bit in the Compare Array. * Note that the y-field will always be 0 for non-PA2.0 FPU's. */ if ((fpu_type & TIMEX_EXTEN_FLAG) || (fpu_type & ROLEX_EXTEN_FLAG) || (fpu_type & PA2_0_FPU_FLAG)) { if (y_field == 0) { *status = ((*status & 0x04000000) >> 5) | /* old Cbit */ ((*status & 0x003ff000) >> 1) | /* old CQ */ (new_status & 0xffc007ff); /* all other bits*/ } else { *status = (*status & 0x04000000) | /* old Cbit */ ((new_status & 0x04000000) >> (y_field+4)) | (new_status & ~0x04000000 & /* other bits */ ~(0x04000000 >> (y_field+4))); } } /* if PA83, just update the C-bit */ else { *status = new_status; } }
gpl-2.0
carz2/cm-kernel
drivers/net/usb/hso.c
38
88008
/****************************************************************************** * * Driver for Option High Speed Mobile Devices. * * Copyright (C) 2008 Option International * Filip Aben <f.aben@option.com> * Denis Joseph Barrow <d.barow@option.com> * Jan Dumon <j.dumon@option.com> * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) * <ajb@spheresystems.co.uk> * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (C) 2008 Novell, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA * * *****************************************************************************/ /****************************************************************************** * * Description of the device: * * Interface 0: Contains the IP network interface on the bulk end points. * The multiplexed serial ports are using the interrupt and * control endpoints. * Interrupt contains a bitmap telling which multiplexed * serialport needs servicing. * * Interface 1: Diagnostics port, uses bulk only, do not submit urbs until the * port is opened, as this have a huge impact on the network port * throughput. * * Interface 2: Standard modem interface - circuit switched interface, this * can be used to make a standard ppp connection however it * should not be used in conjunction with the IP network interface * enabled for USB performance reasons i.e. if using this set * ideally disable_net=1. * *****************************************************************************/ #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/timer.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/kmod.h> #include <linux/rfkill.h> #include <linux/ip.h> #include <linux/uaccess.h> #include <linux/usb/cdc.h> #include <net/arp.h> #include <asm/byteorder.h> #include <linux/serial_core.h> #include <linux/serial.h> #define DRIVER_VERSION "1.2" #define MOD_AUTHOR "Option Wireless" #define MOD_DESCRIPTION "USB High Speed Option driver" #define MOD_LICENSE "GPL" #define HSO_MAX_NET_DEVICES 10 #define HSO__MAX_MTU 2048 #define DEFAULT_MTU 1500 #define DEFAULT_MRU 1500 #define CTRL_URB_RX_SIZE 1024 #define CTRL_URB_TX_SIZE 64 #define BULK_URB_RX_SIZE 4096 #define BULK_URB_TX_SIZE 8192 #define MUX_BULK_RX_BUF_SIZE HSO__MAX_MTU #define MUX_BULK_TX_BUF_SIZE HSO__MAX_MTU #define MUX_BULK_RX_BUF_COUNT 4 #define USB_TYPE_OPTION_VENDOR 0x20 /* These definitions are used with the struct hso_net flags element */ /* - use *_bit operations on it. (bit indices not values.) */ #define HSO_NET_RUNNING 0 #define HSO_NET_TX_TIMEOUT (HZ*10) #define HSO_SERIAL_MAGIC 0x48534f31 /* Number of ttys to handle */ #define HSO_SERIAL_TTY_MINORS 256 #define MAX_RX_URBS 2 static inline struct hso_serial *get_serial_by_tty(struct tty_struct *tty) { if (tty) return tty->driver_data; return NULL; } /*****************************************************************************/ /* Debugging functions */ /*****************************************************************************/ #define D__(lvl_, fmt, arg...) \ do { \ printk(lvl_ "[%d:%s]: " fmt "\n", \ __LINE__, __func__, ## arg); \ } while (0) #define D_(lvl, args...) \ do { \ if (lvl & debug) \ D__(KERN_INFO, args); \ } while (0) #define D1(args...) D_(0x01, ##args) #define D2(args...) D_(0x02, ##args) #define D3(args...) D_(0x04, ##args) #define D4(args...) D_(0x08, ##args) #define D5(args...) D_(0x10, ##args) /*****************************************************************************/ /* Enumerators */ /*****************************************************************************/ enum pkt_parse_state { WAIT_IP, WAIT_DATA, WAIT_SYNC }; /*****************************************************************************/ /* Structs */ /*****************************************************************************/ struct hso_shared_int { struct usb_endpoint_descriptor *intr_endp; void *shared_intr_buf; struct urb *shared_intr_urb; struct usb_device *usb; int use_count; int ref_count; struct mutex shared_int_lock; }; struct hso_net { struct hso_device *parent; struct net_device *net; struct rfkill *rfkill; struct usb_endpoint_descriptor *in_endp; struct usb_endpoint_descriptor *out_endp; struct urb *mux_bulk_rx_urb_pool[MUX_BULK_RX_BUF_COUNT]; struct urb *mux_bulk_tx_urb; void *mux_bulk_rx_buf_pool[MUX_BULK_RX_BUF_COUNT]; void *mux_bulk_tx_buf; struct sk_buff *skb_rx_buf; struct sk_buff *skb_tx_buf; enum pkt_parse_state rx_parse_state; spinlock_t net_lock; unsigned short rx_buf_size; unsigned short rx_buf_missing; struct iphdr rx_ip_hdr; unsigned long flags; }; enum rx_ctrl_state{ RX_IDLE, RX_SENT, RX_PENDING }; #define BM_REQUEST_TYPE (0xa1) #define B_NOTIFICATION (0x20) #define W_VALUE (0x0) #define W_INDEX (0x2) #define W_LENGTH (0x2) #define B_OVERRUN (0x1<<6) #define B_PARITY (0x1<<5) #define B_FRAMING (0x1<<4) #define B_RING_SIGNAL (0x1<<3) #define B_BREAK (0x1<<2) #define B_TX_CARRIER (0x1<<1) #define B_RX_CARRIER (0x1<<0) struct hso_serial_state_notification { u8 bmRequestType; u8 bNotification; u16 wValue; u16 wIndex; u16 wLength; u16 UART_state_bitmap; } __attribute__((packed)); struct hso_tiocmget { struct mutex mutex; wait_queue_head_t waitq; int intr_completed; struct usb_endpoint_descriptor *endp; struct urb *urb; struct hso_serial_state_notification serial_state_notification; u16 prev_UART_state_bitmap; struct uart_icount icount; }; struct hso_serial { struct hso_device *parent; int magic; u8 minor; struct hso_shared_int *shared_int; /* rx/tx urb could be either a bulk urb or a control urb depending on which serial port it is used on. */ struct urb *rx_urb[MAX_RX_URBS]; u8 num_rx_urbs; u8 *rx_data[MAX_RX_URBS]; u16 rx_data_length; /* should contain allocated length */ struct urb *tx_urb; u8 *tx_data; u8 *tx_buffer; u16 tx_data_length; /* should contain allocated length */ u16 tx_data_count; u16 tx_buffer_count; struct usb_ctrlrequest ctrl_req_tx; struct usb_ctrlrequest ctrl_req_rx; struct usb_endpoint_descriptor *in_endp; struct usb_endpoint_descriptor *out_endp; enum rx_ctrl_state rx_state; u8 rts_state; u8 dtr_state; unsigned tx_urb_used:1; /* from usb_serial_port */ struct tty_struct *tty; int open_count; spinlock_t serial_lock; int (*write_data) (struct hso_serial *serial); struct hso_tiocmget *tiocmget; /* Hacks required to get flow control * working on the serial receive buffers * so as not to drop characters on the floor. */ int curr_rx_urb_idx; u16 curr_rx_urb_offset; u8 rx_urb_filled[MAX_RX_URBS]; struct tasklet_struct unthrottle_tasklet; struct work_struct retry_unthrottle_workqueue; }; struct hso_device { union { struct hso_serial *dev_serial; struct hso_net *dev_net; } port_data; u32 port_spec; u8 is_active; u8 usb_gone; struct work_struct async_get_intf; struct work_struct async_put_intf; struct work_struct reset_device; struct usb_device *usb; struct usb_interface *interface; struct device *dev; struct kref ref; struct mutex mutex; }; /* Type of interface */ #define HSO_INTF_MASK 0xFF00 #define HSO_INTF_MUX 0x0100 #define HSO_INTF_BULK 0x0200 /* Type of port */ #define HSO_PORT_MASK 0xFF #define HSO_PORT_NO_PORT 0x0 #define HSO_PORT_CONTROL 0x1 #define HSO_PORT_APP 0x2 #define HSO_PORT_GPS 0x3 #define HSO_PORT_PCSC 0x4 #define HSO_PORT_APP2 0x5 #define HSO_PORT_GPS_CONTROL 0x6 #define HSO_PORT_MSD 0x7 #define HSO_PORT_VOICE 0x8 #define HSO_PORT_DIAG2 0x9 #define HSO_PORT_DIAG 0x10 #define HSO_PORT_MODEM 0x11 #define HSO_PORT_NETWORK 0x12 /* Additional device info */ #define HSO_INFO_MASK 0xFF000000 #define HSO_INFO_CRC_BUG 0x01000000 /*****************************************************************************/ /* Prototypes */ /*****************************************************************************/ /* Serial driver functions */ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); static void ctrl_callback(struct urb *urb); static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial); static void hso_kick_transmit(struct hso_serial *serial); /* Helper functions */ static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, struct usb_device *usb, gfp_t gfp); static void handle_usb_error(int status, const char *function, struct hso_device *hso_dev); static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, int type, int dir); static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); static void hso_free_interface(struct usb_interface *intf); static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags); static int hso_stop_serial_device(struct hso_device *hso_dev); static int hso_start_net_device(struct hso_device *hso_dev); static void hso_free_shared_int(struct hso_shared_int *shared_int); static int hso_stop_net_device(struct hso_device *hso_dev); static void hso_serial_ref_free(struct kref *ref); static void hso_std_serial_read_bulk_callback(struct urb *urb); static int hso_mux_serial_read(struct hso_serial *serial); static void async_get_intf(struct work_struct *data); static void async_put_intf(struct work_struct *data); static int hso_put_activity(struct hso_device *hso_dev); static int hso_get_activity(struct hso_device *hso_dev); static void tiocmget_intr_callback(struct urb *urb); static void reset_device(struct work_struct *data); /*****************************************************************************/ /* Helping functions */ /*****************************************************************************/ /* #define DEBUG */ static inline struct hso_net *dev2net(struct hso_device *hso_dev) { return hso_dev->port_data.dev_net; } static inline struct hso_serial *dev2ser(struct hso_device *hso_dev) { return hso_dev->port_data.dev_serial; } /* Debugging functions */ #ifdef DEBUG static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, unsigned int len) { static char name[255]; sprintf(name, "hso[%d:%s]", line_count, func_name); print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len); } #define DUMP(buf_, len_) \ dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_) #define DUMP1(buf_, len_) \ do { \ if (0x01 & debug) \ DUMP(buf_, len_); \ } while (0) #else #define DUMP(buf_, len_) #define DUMP1(buf_, len_) #endif /* module parameters */ static int debug; static int tty_major; static int disable_net; /* driver info */ static const char driver_name[] = "hso"; static const char tty_filename[] = "ttyHS"; static const char *version = __FILE__ ": " DRIVER_VERSION " " MOD_AUTHOR; /* the usb driver itself (registered in hso_init) */ static struct usb_driver hso_driver; /* serial structures */ static struct tty_driver *tty_drv; static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS]; static struct hso_device *network_table[HSO_MAX_NET_DEVICES]; static spinlock_t serial_table_lock; static const s32 default_port_spec[] = { HSO_INTF_MUX | HSO_PORT_NETWORK, HSO_INTF_BULK | HSO_PORT_DIAG, HSO_INTF_BULK | HSO_PORT_MODEM, 0 }; static const s32 icon321_port_spec[] = { HSO_INTF_MUX | HSO_PORT_NETWORK, HSO_INTF_BULK | HSO_PORT_DIAG2, HSO_INTF_BULK | HSO_PORT_MODEM, HSO_INTF_BULK | HSO_PORT_DIAG, 0 }; #define default_port_device(vendor, product) \ USB_DEVICE(vendor, product), \ .driver_info = (kernel_ulong_t)default_port_spec #define icon321_port_device(vendor, product) \ USB_DEVICE(vendor, product), \ .driver_info = (kernel_ulong_t)icon321_port_spec /* list of devices we support */ static const struct usb_device_id hso_ids[] = { {default_port_device(0x0af0, 0x6711)}, {default_port_device(0x0af0, 0x6731)}, {default_port_device(0x0af0, 0x6751)}, {default_port_device(0x0af0, 0x6771)}, {default_port_device(0x0af0, 0x6791)}, {default_port_device(0x0af0, 0x6811)}, {default_port_device(0x0af0, 0x6911)}, {default_port_device(0x0af0, 0x6951)}, {default_port_device(0x0af0, 0x6971)}, {default_port_device(0x0af0, 0x7011)}, {default_port_device(0x0af0, 0x7031)}, {default_port_device(0x0af0, 0x7051)}, {default_port_device(0x0af0, 0x7071)}, {default_port_device(0x0af0, 0x7111)}, {default_port_device(0x0af0, 0x7211)}, {default_port_device(0x0af0, 0x7251)}, {default_port_device(0x0af0, 0x7271)}, {default_port_device(0x0af0, 0x7311)}, {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */ {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7381)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7701)}, {USB_DEVICE(0x0af0, 0x7706)}, {USB_DEVICE(0x0af0, 0x7801)}, {USB_DEVICE(0x0af0, 0x7901)}, {USB_DEVICE(0x0af0, 0x7A01)}, {USB_DEVICE(0x0af0, 0x7A05)}, {USB_DEVICE(0x0af0, 0x8200)}, {USB_DEVICE(0x0af0, 0x8201)}, {USB_DEVICE(0x0af0, 0x8300)}, {USB_DEVICE(0x0af0, 0x8302)}, {USB_DEVICE(0x0af0, 0x8304)}, {USB_DEVICE(0x0af0, 0x8400)}, {USB_DEVICE(0x0af0, 0xd035)}, {USB_DEVICE(0x0af0, 0xd055)}, {USB_DEVICE(0x0af0, 0xd155)}, {USB_DEVICE(0x0af0, 0xd255)}, {USB_DEVICE(0x0af0, 0xd057)}, {USB_DEVICE(0x0af0, 0xd157)}, {USB_DEVICE(0x0af0, 0xd257)}, {USB_DEVICE(0x0af0, 0xd357)}, {USB_DEVICE(0x0af0, 0xd058)}, {USB_DEVICE(0x0af0, 0xc100)}, {} }; MODULE_DEVICE_TABLE(usb, hso_ids); /* Sysfs attribute */ static ssize_t hso_sysfs_show_porttype(struct device *dev, struct device_attribute *attr, char *buf) { struct hso_device *hso_dev = dev_get_drvdata(dev); char *port_name; if (!hso_dev) return 0; switch (hso_dev->port_spec & HSO_PORT_MASK) { case HSO_PORT_CONTROL: port_name = "Control"; break; case HSO_PORT_APP: port_name = "Application"; break; case HSO_PORT_APP2: port_name = "Application2"; break; case HSO_PORT_GPS: port_name = "GPS"; break; case HSO_PORT_GPS_CONTROL: port_name = "GPS Control"; break; case HSO_PORT_PCSC: port_name = "PCSC"; break; case HSO_PORT_DIAG: port_name = "Diagnostic"; break; case HSO_PORT_DIAG2: port_name = "Diagnostic2"; break; case HSO_PORT_MODEM: port_name = "Modem"; break; case HSO_PORT_NETWORK: port_name = "Network"; break; default: port_name = "Unknown"; break; } return sprintf(buf, "%s\n", port_name); } static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb) { int idx; for (idx = 0; idx < serial->num_rx_urbs; idx++) if (serial->rx_urb[idx] == urb) return idx; dev_err(serial->parent->dev, "hso_urb_to_index failed\n"); return -1; } /* converts mux value to a port spec value */ static u32 hso_mux_to_port(int mux) { u32 result; switch (mux) { case 0x1: result = HSO_PORT_CONTROL; break; case 0x2: result = HSO_PORT_APP; break; case 0x4: result = HSO_PORT_PCSC; break; case 0x8: result = HSO_PORT_GPS; break; case 0x10: result = HSO_PORT_APP2; break; default: result = HSO_PORT_NO_PORT; } return result; } /* converts port spec value to a mux value */ static u32 hso_port_to_mux(int port) { u32 result; switch (port & HSO_PORT_MASK) { case HSO_PORT_CONTROL: result = 0x0; break; case HSO_PORT_APP: result = 0x1; break; case HSO_PORT_PCSC: result = 0x2; break; case HSO_PORT_GPS: result = 0x3; break; case HSO_PORT_APP2: result = 0x4; break; default: result = 0x0; } return result; } static struct hso_serial *get_serial_by_shared_int_and_type( struct hso_shared_int *shared_int, int mux) { int i, port; port = hso_mux_to_port(mux); for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (dev2ser(serial_table[i])->shared_int == shared_int) && ((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) { return dev2ser(serial_table[i]); } } return NULL; } static struct hso_serial *get_serial_by_index(unsigned index) { struct hso_serial *serial = NULL; unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); if (serial_table[index]) serial = dev2ser(serial_table[index]); spin_unlock_irqrestore(&serial_table_lock, flags); return serial; } static int get_free_serial_index(void) { int index; unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { spin_unlock_irqrestore(&serial_table_lock, flags); return index; } } spin_unlock_irqrestore(&serial_table_lock, flags); printk(KERN_ERR "%s: no free serial devices in table\n", __func__); return -1; } static void set_serial_by_index(unsigned index, struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); if (serial) serial_table[index] = serial->parent; else serial_table[index] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } static void handle_usb_error(int status, const char *function, struct hso_device *hso_dev) { char *explanation; switch (status) { case -ENODEV: explanation = "no device"; break; case -ENOENT: explanation = "endpoint not enabled"; break; case -EPIPE: explanation = "endpoint stalled"; break; case -ENOSPC: explanation = "not enough bandwidth"; break; case -ESHUTDOWN: explanation = "device disabled"; break; case -EHOSTUNREACH: explanation = "device suspended"; break; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: explanation = "internal error"; break; case -EILSEQ: case -EPROTO: case -ETIME: case -ETIMEDOUT: explanation = "protocol error"; if (hso_dev) schedule_work(&hso_dev->reset_device); break; default: explanation = "unknown status"; break; } /* log a meaningful explanation of an USB status */ D1("%s: received USB status - %s (%d)", function, explanation, status); } /* Network interface functions */ /* called when net interface is brought up by ifconfig */ static int hso_net_open(struct net_device *net) { struct hso_net *odev = netdev_priv(net); unsigned long flags = 0; if (!odev) { dev_err(&net->dev, "No net device !\n"); return -ENODEV; } odev->skb_tx_buf = NULL; /* setup environment */ spin_lock_irqsave(&odev->net_lock, flags); odev->rx_parse_state = WAIT_IP; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); spin_unlock_irqrestore(&odev->net_lock, flags); /* We are up and running. */ set_bit(HSO_NET_RUNNING, &odev->flags); hso_start_net_device(odev->parent); /* Tell the kernel we are ready to start receiving from it */ netif_start_queue(net); return 0; } /* called when interface is brought down by ifconfig */ static int hso_net_close(struct net_device *net) { struct hso_net *odev = netdev_priv(net); /* we don't need the queue anymore */ netif_stop_queue(net); /* no longer running */ clear_bit(HSO_NET_RUNNING, &odev->flags); hso_stop_net_device(odev->parent); /* done */ return 0; } /* USB tells is xmit done, we should start the netqueue again */ static void write_bulk_callback(struct urb *urb) { struct hso_net *odev = urb->context; int status = urb->status; /* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) { dev_err(&urb->dev->dev, "%s: device not running\n", __func__); return; } /* Do we still have a valid kernel network device? */ if (!netif_device_present(odev->net)) { dev_err(&urb->dev->dev, "%s: net device not present\n", __func__); return; } /* log status, but don't act on it, we don't need to resubmit anything * anyhow */ if (status) handle_usb_error(status, __func__, odev->parent); hso_put_activity(odev->parent); /* Tell the network interface we are ready for another frame */ netif_wake_queue(odev->net); } /* called by kernel when we need to transmit a packet */ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct hso_net *odev = netdev_priv(net); int result; /* Tell the kernel, "No more frames 'til we are done with this one." */ netif_stop_queue(net); if (hso_get_activity(odev->parent) == -EAGAIN) { odev->skb_tx_buf = skb; return NETDEV_TX_OK; } /* log if asked */ DUMP1(skb->data, skb->len); /* Copy it from kernel memory to OUR memory */ memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len); D1("len: %d/%d", skb->len, MUX_BULK_TX_BUF_SIZE); /* Fill in the URB for shipping it out. */ usb_fill_bulk_urb(odev->mux_bulk_tx_urb, odev->parent->usb, usb_sndbulkpipe(odev->parent->usb, odev->out_endp-> bEndpointAddress & 0x7F), odev->mux_bulk_tx_buf, skb->len, write_bulk_callback, odev); /* Deal with the Zero Length packet problem, I hope */ odev->mux_bulk_tx_urb->transfer_flags |= URB_ZERO_PACKET; /* Send the URB on its merry way. */ result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); if (result) { dev_warn(&odev->parent->interface->dev, "failed mux_bulk_tx_urb %d\n", result); net->stats.tx_errors++; netif_start_queue(net); } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; /* And tell the kernel when the last transmit started. */ net->trans_start = jiffies; } dev_kfree_skb(skb); /* we're done */ return NETDEV_TX_OK; } static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct hso_net *odev = netdev_priv(net); strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info); } static const struct ethtool_ops ops = { .get_drvinfo = hso_get_drvinfo, .get_link = ethtool_op_get_link }; /* called when a packet did not ack after watchdogtimeout */ static void hso_net_tx_timeout(struct net_device *net) { struct hso_net *odev = netdev_priv(net); if (!odev) return; /* Tell syslog we are hosed. */ dev_warn(&net->dev, "Tx timed out.\n"); /* Tear the waiting frame off the list */ if (odev->mux_bulk_tx_urb && (odev->mux_bulk_tx_urb->status == -EINPROGRESS)) usb_unlink_urb(odev->mux_bulk_tx_urb); /* Update statistics */ net->stats.tx_errors++; } /* make a real packet from the received USB buffer */ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt, unsigned int count, unsigned char is_eop) { unsigned short temp_bytes; unsigned short buffer_offset = 0; unsigned short frame_len; unsigned char *tmp_rx_buf; /* log if needed */ D1("Rx %d bytes", count); DUMP(ip_pkt, min(128, (int)count)); while (count) { switch (odev->rx_parse_state) { case WAIT_IP: /* waiting for IP header. */ /* wanted bytes - size of ip header */ temp_bytes = (count < odev->rx_buf_missing) ? count : odev-> rx_buf_missing; memcpy(((unsigned char *)(&odev->rx_ip_hdr)) + odev->rx_buf_size, ip_pkt + buffer_offset, temp_bytes); odev->rx_buf_size += temp_bytes; buffer_offset += temp_bytes; odev->rx_buf_missing -= temp_bytes; count -= temp_bytes; if (!odev->rx_buf_missing) { /* header is complete allocate an sk_buffer and * continue to WAIT_DATA */ frame_len = ntohs(odev->rx_ip_hdr.tot_len); if ((frame_len > DEFAULT_MRU) || (frame_len < sizeof(struct iphdr))) { dev_err(&odev->net->dev, "Invalid frame (%d) length\n", frame_len); odev->rx_parse_state = WAIT_SYNC; continue; } /* Allocate an sk_buff */ odev->skb_rx_buf = netdev_alloc_skb(odev->net, frame_len); if (!odev->skb_rx_buf) { /* We got no receive buffer. */ D1("could not allocate memory"); odev->rx_parse_state = WAIT_SYNC; return; } /* Copy what we got so far. make room for iphdr * after tail. */ tmp_rx_buf = skb_put(odev->skb_rx_buf, sizeof(struct iphdr)); memcpy(tmp_rx_buf, (char *)&(odev->rx_ip_hdr), sizeof(struct iphdr)); /* ETH_HLEN */ odev->rx_buf_size = sizeof(struct iphdr); /* Filip actually use .tot_len */ odev->rx_buf_missing = frame_len - sizeof(struct iphdr); odev->rx_parse_state = WAIT_DATA; } break; case WAIT_DATA: temp_bytes = (count < odev->rx_buf_missing) ? count : odev->rx_buf_missing; /* Copy the rest of the bytes that are left in the * buffer into the waiting sk_buf. */ /* Make room for temp_bytes after tail. */ tmp_rx_buf = skb_put(odev->skb_rx_buf, temp_bytes); memcpy(tmp_rx_buf, ip_pkt + buffer_offset, temp_bytes); odev->rx_buf_missing -= temp_bytes; count -= temp_bytes; buffer_offset += temp_bytes; odev->rx_buf_size += temp_bytes; if (!odev->rx_buf_missing) { /* Packet is complete. Inject into stack. */ /* We have IP packet here */ odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); /* don't check it */ odev->skb_rx_buf->ip_summed = CHECKSUM_UNNECESSARY; skb_reset_mac_header(odev->skb_rx_buf); /* Ship it off to the kernel */ netif_rx(odev->skb_rx_buf); /* No longer our buffer. */ odev->skb_rx_buf = NULL; /* update out statistics */ odev->net->stats.rx_packets++; odev->net->stats.rx_bytes += odev->rx_buf_size; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); odev->rx_parse_state = WAIT_IP; } break; case WAIT_SYNC: D1(" W_S"); count = 0; break; default: D1(" "); count--; break; } } /* Recovery mechanism for WAIT_SYNC state. */ if (is_eop) { if (odev->rx_parse_state == WAIT_SYNC) { odev->rx_parse_state = WAIT_IP; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); } } } /* Moving data from usb to kernel (in interrupt state) */ static void read_bulk_callback(struct urb *urb) { struct hso_net *odev = urb->context; struct net_device *net; int result; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ if (status) { handle_usb_error(status, __func__, odev->parent); return; } /* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) { D1("BULK IN callback but driver is not active!"); return; } usb_mark_last_busy(urb->dev); net = odev->net; if (!netif_device_present(net)) { /* Somebody killed our network interface... */ return; } if (odev->parent->port_spec & HSO_INFO_CRC_BUG) { u32 rest; u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; rest = urb->actual_length % le16_to_cpu(odev->in_endp->wMaxPacketSize); if (((rest == 5) || (rest == 6)) && !memcmp(((u8 *) urb->transfer_buffer) + urb->actual_length - 4, crc_check, 4)) { urb->actual_length -= 4; } } /* do we even have a packet? */ if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ spin_lock(&odev->net_lock); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); spin_unlock(&odev->net_lock); } /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame. Reuse same as received. */ usb_fill_bulk_urb(urb, odev->parent->usb, usb_rcvbulkpipe(odev->parent->usb, odev->in_endp-> bEndpointAddress & 0x7F), urb->transfer_buffer, MUX_BULK_RX_BUF_SIZE, read_bulk_callback, odev); /* Give this to the USB subsystem so it can tell us when more data * arrives. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_warn(&odev->parent->interface->dev, "%s failed submit mux_bulk_rx_urb %d\n", __func__, result); } /* Serial driver functions */ static void hso_init_termios(struct ktermios *termios) { /* * The default requirements for this device are: */ termios->c_iflag &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input characters */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ /* disable postprocess output characters */ termios->c_oflag &= ~OPOST; termios->c_lflag &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ termios->c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD /* clear current baud rate */ | CBAUDEX); /* clear current buad rate */ termios->c_cflag |= CS8; /* character size 8 bits */ /* baud rate 115200 */ tty_termios_encode_baud_rate(termios, 115200, 115200); } static void _hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) { struct hso_serial *serial = get_serial_by_tty(tty); struct ktermios *termios; if (!serial) { printk(KERN_ERR "%s: no tty structures", __func__); return; } D4("port %d", serial->minor); /* * Fix up unsupported bits */ termios = tty->termios; termios->c_iflag &= ~IXON; /* disable enable XON/XOFF flow control */ termios->c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD /* clear current baud rate */ | CBAUDEX); /* clear current buad rate */ termios->c_cflag |= CS8; /* character size 8 bits */ /* baud rate 115200 */ tty_encode_baud_rate(tty, 115200, 115200); } static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) { int result; /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame */ usb_fill_bulk_urb(urb, serial->parent->usb, usb_rcvbulkpipe(serial->parent->usb, serial->in_endp-> bEndpointAddress & 0x7F), urb->transfer_buffer, serial->rx_data_length, hso_std_serial_read_bulk_callback, serial); /* Give this to the USB subsystem so it can tell us when more data * arrives. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n", __func__, result); } } static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial) { int count; struct urb *curr_urb; while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) { curr_urb = serial->rx_urb[serial->curr_rx_urb_idx]; count = put_rxbuf_data(curr_urb, serial); if (count == -1) return; if (count == 0) { serial->curr_rx_urb_idx++; if (serial->curr_rx_urb_idx >= serial->num_rx_urbs) serial->curr_rx_urb_idx = 0; hso_resubmit_rx_bulk_urb(serial, curr_urb); } } } static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) { int count = 0; struct urb *urb; urb = serial->rx_urb[0]; if (serial->open_count > 0) { count = put_rxbuf_data(urb, serial); if (count == -1) return; } /* Re issue a read as long as we receive data. */ if (count == 0 && ((urb->actual_length != 0) || (serial->rx_state == RX_PENDING))) { serial->rx_state = RX_SENT; hso_mux_serial_read(serial); } else serial->rx_state = RX_IDLE; } /* read callback for Diag and CS port */ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; /* sanity check */ if (!serial) { D1("serial == NULL"); return; } else if (status) { handle_usb_error(status, __func__, serial->parent); return; } D4("\n--- Got serial_read_bulk callback %02x ---", status); D1("Actual length = %d\n", urb->actual_length); DUMP1(urb->transfer_buffer, urb->actual_length); /* Anyone listening? */ if (serial->open_count == 0) return; if (status == 0) { if (serial->parent->port_spec & HSO_INFO_CRC_BUG) { u32 rest; u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; rest = urb->actual_length % le16_to_cpu(serial->in_endp->wMaxPacketSize); if (((rest == 5) || (rest == 6)) && !memcmp(((u8 *) urb->transfer_buffer) + urb->actual_length - 4, crc_check, 4)) { urb->actual_length -= 4; } } /* Valid data, handle RX data */ spin_lock(&serial->serial_lock); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); spin_unlock(&serial->serial_lock); } else if (status == -ENOENT || status == -ECONNRESET) { /* Unlinked - check for throttled port. */ D2("Port %d, successfully unlinked urb", serial->minor); spin_lock(&serial->serial_lock); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; hso_resubmit_rx_bulk_urb(serial, urb); spin_unlock(&serial->serial_lock); } else { D2("Port %d, status = %d for read urb", serial->minor, status); return; } } /* * This needs to be a tasklet otherwise we will * end up recursively calling this function. */ static void hso_unthrottle_tasklet(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); if ((serial->parent->port_spec & HSO_INTF_MUX)) put_rxbuf_data_and_resubmit_ctrl_urb(serial); else put_rxbuf_data_and_resubmit_bulk_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } static void hso_unthrottle(struct tty_struct *tty) { struct hso_serial *serial = get_serial_by_tty(tty); tasklet_hi_schedule(&serial->unthrottle_tasklet); } static void hso_unthrottle_workfunc(struct work_struct *work) { struct hso_serial *serial = container_of(work, struct hso_serial, retry_unthrottle_workqueue); hso_unthrottle_tasklet(serial); } /* open the requested serial port */ static int hso_serial_open(struct tty_struct *tty, struct file *filp) { struct hso_serial *serial = get_serial_by_index(tty->index); int result; /* sanity check */ if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) { WARN_ON(1); tty->driver_data = NULL; D1("Failed to open port"); return -ENODEV; } mutex_lock(&serial->parent->mutex); result = usb_autopm_get_interface(serial->parent->interface); if (result < 0) goto err_out; D1("Opening %d", serial->minor); kref_get(&serial->parent->ref); /* setup */ spin_lock_irq(&serial->serial_lock); tty->driver_data = serial; tty_kref_put(serial->tty); serial->tty = tty_kref_get(tty); spin_unlock_irq(&serial->serial_lock); /* check for port already opened, if not set the termios */ serial->open_count++; if (serial->open_count == 1) { tty->low_latency = 1; serial->rx_state = RX_IDLE; /* Force default termio settings */ _hso_serial_set_termios(tty, NULL); tasklet_init(&serial->unthrottle_tasklet, (void (*)(unsigned long))hso_unthrottle_tasklet, (unsigned long)serial); INIT_WORK(&serial->retry_unthrottle_workqueue, hso_unthrottle_workfunc); result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); serial->open_count--; kref_put(&serial->parent->ref, hso_serial_ref_free); } } else { D1("Port was already open"); } usb_autopm_put_interface(serial->parent->interface); /* done */ if (result) hso_serial_tiocmset(tty, NULL, TIOCM_RTS | TIOCM_DTR, 0); err_out: mutex_unlock(&serial->parent->mutex); return result; } /* close the requested serial port */ static void hso_serial_close(struct tty_struct *tty, struct file *filp) { struct hso_serial *serial = tty->driver_data; u8 usb_gone; D1("Closing serial port"); /* Open failed, no close cleanup required */ if (serial == NULL) return; mutex_lock(&serial->parent->mutex); usb_gone = serial->parent->usb_gone; if (!usb_gone) usb_autopm_get_interface(serial->parent->interface); /* reset the rts and dtr */ /* do the actual close */ serial->open_count--; if (serial->open_count <= 0) { serial->open_count = 0; spin_lock_irq(&serial->serial_lock); if (serial->tty == tty) { serial->tty->driver_data = NULL; serial->tty = NULL; tty_kref_put(tty); } spin_unlock_irq(&serial->serial_lock); if (!usb_gone) hso_stop_serial_device(serial->parent); tasklet_kill(&serial->unthrottle_tasklet); cancel_work_sync(&serial->retry_unthrottle_workqueue); } if (!usb_gone) usb_autopm_put_interface(serial->parent->interface); mutex_unlock(&serial->parent->mutex); kref_put(&serial->parent->ref, hso_serial_ref_free); } /* close the requested serial port */ static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct hso_serial *serial = get_serial_by_tty(tty); int space, tx_bytes; unsigned long flags; /* sanity check */ if (serial == NULL) { printk(KERN_ERR "%s: serial is NULL\n", __func__); return -ENODEV; } spin_lock_irqsave(&serial->serial_lock, flags); space = serial->tx_data_length - serial->tx_buffer_count; tx_bytes = (count < space) ? count : space; if (!tx_bytes) goto out; memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, tx_bytes); serial->tx_buffer_count += tx_bytes; out: spin_unlock_irqrestore(&serial->serial_lock, flags); hso_kick_transmit(serial); /* done */ return tx_bytes; } /* how much room is there for writing */ static int hso_serial_write_room(struct tty_struct *tty) { struct hso_serial *serial = get_serial_by_tty(tty); int room; unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); room = serial->tx_data_length - serial->tx_buffer_count; spin_unlock_irqrestore(&serial->serial_lock, flags); /* return free room */ return room; } /* setup the term */ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) { struct hso_serial *serial = get_serial_by_tty(tty); unsigned long flags; if (old) D5("Termios called with: cflags new[%d] - old[%d]", tty->termios->c_cflag, old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); if (serial->open_count) _hso_serial_set_termios(tty, old); else tty->termios = old; spin_unlock_irqrestore(&serial->serial_lock, flags); /* done */ return; } /* how many characters in the buffer */ static int hso_serial_chars_in_buffer(struct tty_struct *tty) { struct hso_serial *serial = get_serial_by_tty(tty); int chars; unsigned long flags; /* sanity check */ if (serial == NULL) return 0; spin_lock_irqsave(&serial->serial_lock, flags); chars = serial->tx_buffer_count; spin_unlock_irqrestore(&serial->serial_lock, flags); return chars; } static int tiocmget_submit_urb(struct hso_serial *serial, struct hso_tiocmget *tiocmget, struct usb_device *usb) { int result; if (serial->parent->usb_gone) return -ENODEV; usb_fill_int_urb(tiocmget->urb, usb, usb_rcvintpipe(usb, tiocmget->endp-> bEndpointAddress & 0x7F), &tiocmget->serial_state_notification, sizeof(struct hso_serial_state_notification), tiocmget_intr_callback, serial, tiocmget->endp->bInterval); result = usb_submit_urb(tiocmget->urb, GFP_ATOMIC); if (result) { dev_warn(&usb->dev, "%s usb_submit_urb failed %d\n", __func__, result); } return result; } static void tiocmget_intr_callback(struct urb *urb) { struct hso_serial *serial = urb->context; struct hso_tiocmget *tiocmget; int status = urb->status; u16 UART_state_bitmap, prev_UART_state_bitmap; struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; /* Sanity checks */ if (!serial) return; if (status) { handle_usb_error(status, __func__, serial->parent); return; } tiocmget = serial->tiocmget; if (!tiocmget) return; usb = serial->parent->usb; serial_state_notification = &tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || serial_state_notification->bNotification != B_NOTIFICATION || le16_to_cpu(serial_state_notification->wValue) != W_VALUE || le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { dev_warn(&usb->dev, "hso received invalid serial state notification\n"); DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; spin_lock(&serial->serial_lock); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; if ((UART_state_bitmap & B_PARITY) != (prev_UART_state_bitmap & B_PARITY)) icount->parity++; if ((UART_state_bitmap & B_FRAMING) != (prev_UART_state_bitmap & B_FRAMING)) icount->frame++; if ((UART_state_bitmap & B_RING_SIGNAL) && !(prev_UART_state_bitmap & B_RING_SIGNAL)) icount->rng++; if ((UART_state_bitmap & B_BREAK) != (prev_UART_state_bitmap & B_BREAK)) icount->brk++; if ((UART_state_bitmap & B_TX_CARRIER) != (prev_UART_state_bitmap & B_TX_CARRIER)) icount->dsr++; if ((UART_state_bitmap & B_RX_CARRIER) != (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; spin_unlock(&serial->serial_lock); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } memset(serial_state_notification, 0, sizeof(struct hso_serial_state_notification)); tiocmget_submit_urb(serial, tiocmget, serial->parent->usb); } /* * next few functions largely stolen from drivers/serial/serial_core.c */ /* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ static int hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) { DECLARE_WAITQUEUE(wait, current); struct uart_icount cprev, cnow; struct hso_tiocmget *tiocmget; int ret; tiocmget = serial->tiocmget; if (!tiocmget) return -ENOENT; /* * note the counters on entry */ spin_lock_irq(&serial->serial_lock); memcpy(&cprev, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); add_wait_queue(&tiocmget->waitq, &wait); for (;;) { spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); set_current_state(TASK_INTERRUPTIBLE); if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd))) { ret = 0; break; } schedule(); /* see if a signal did it */ if (signal_pending(current)) { ret = -ERESTARTSYS; break; } cprev = cnow; } current->state = TASK_RUNNING; remove_wait_queue(&tiocmget->waitq, &wait); return ret; } /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ static int hso_get_count(struct hso_serial *serial, struct serial_icounter_struct __user *icnt) { struct serial_icounter_struct icount; struct uart_icount cnow; struct hso_tiocmget *tiocmget = serial->tiocmget; if (!tiocmget) return -ENOENT; spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; icount.dcd = cnow.dcd; icount.rx = cnow.rx; icount.tx = cnow.tx; icount.frame = cnow.frame; icount.overrun = cnow.overrun; icount.parity = cnow.parity; icount.brk = cnow.brk; icount.buf_overrun = cnow.buf_overrun; return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0; } static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file) { int retval; struct hso_serial *serial = get_serial_by_tty(tty); struct hso_tiocmget *tiocmget; u16 UART_state_bitmap; /* sanity check */ if (!serial) { D1("no tty structures"); return -EINVAL; } spin_lock_irq(&serial->serial_lock); retval = ((serial->rts_state) ? TIOCM_RTS : 0) | ((serial->dtr_state) ? TIOCM_DTR : 0); tiocmget = serial->tiocmget; if (tiocmget) { UART_state_bitmap = le16_to_cpu( tiocmget->prev_UART_state_bitmap); if (UART_state_bitmap & B_RING_SIGNAL) retval |= TIOCM_RNG; if (UART_state_bitmap & B_RX_CARRIER) retval |= TIOCM_CD; if (UART_state_bitmap & B_TX_CARRIER) retval |= TIOCM_DSR; } spin_unlock_irq(&serial->serial_lock); return retval; } static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { int val = 0; unsigned long flags; int if_num; struct hso_serial *serial = get_serial_by_tty(tty); /* sanity check */ if (!serial) { D1("no tty structures"); return -EINVAL; } if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) return -EINVAL; if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; spin_lock_irqsave(&serial->serial_lock, flags); if (set & TIOCM_RTS) serial->rts_state = 1; if (set & TIOCM_DTR) serial->dtr_state = 1; if (clear & TIOCM_RTS) serial->rts_state = 0; if (clear & TIOCM_DTR) serial->dtr_state = 0; if (serial->dtr_state) val |= 0x01; if (serial->rts_state) val |= 0x02; spin_unlock_irqrestore(&serial->serial_lock, flags); return usb_control_msg(serial->parent->usb, usb_rcvctrlpipe(serial->parent->usb, 0), 0x22, 0x21, val, if_num, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int hso_serial_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct hso_serial *serial = get_serial_by_tty(tty); void __user *uarg = (void __user *)arg; int ret = 0; D4("IOCTL cmd: %d, arg: %ld", cmd, arg); if (!serial) return -ENODEV; switch (cmd) { case TIOCMIWAIT: ret = hso_wait_modem_status(serial, arg); break; case TIOCGICOUNT: ret = hso_get_count(serial, uarg); break; default: ret = -ENOIOCTLCMD; break; } return ret; } /* starts a transmit */ static void hso_kick_transmit(struct hso_serial *serial) { u8 *temp; unsigned long flags; int res; spin_lock_irqsave(&serial->serial_lock, flags); if (!serial->tx_buffer_count) goto out; if (serial->tx_urb_used) goto out; /* Wakeup USB interface if necessary */ if (hso_get_activity(serial->parent) == -EAGAIN) goto out; /* Switch pointers around to avoid memcpy */ temp = serial->tx_buffer; serial->tx_buffer = serial->tx_data; serial->tx_data = temp; serial->tx_data_count = serial->tx_buffer_count; serial->tx_buffer_count = 0; /* If temp is set, it means we switched buffers */ if (temp && serial->write_data) { res = serial->write_data(serial); if (res >= 0) serial->tx_urb_used = 1; } out: spin_unlock_irqrestore(&serial->serial_lock, flags); } /* make a request (for reading and writing data to muxed serial port) */ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port, struct urb *ctrl_urb, struct usb_ctrlrequest *ctrl_req, u8 *ctrl_urb_data, u32 size) { int result; int pipe; /* Sanity check */ if (!serial || !ctrl_urb || !ctrl_req) { printk(KERN_ERR "%s: Wrong arguments\n", __func__); return -EINVAL; } /* initialize */ ctrl_req->wValue = 0; ctrl_req->wIndex = cpu_to_le16(hso_port_to_mux(port)); ctrl_req->wLength = cpu_to_le16(size); if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) { /* Reading command */ ctrl_req->bRequestType = USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE; ctrl_req->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; pipe = usb_rcvctrlpipe(serial->parent->usb, 0); } else { /* Writing command */ ctrl_req->bRequestType = USB_DIR_OUT | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE; ctrl_req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; pipe = usb_sndctrlpipe(serial->parent->usb, 0); } /* syslog */ D2("%s command (%02x) len: %d, port: %d", type == USB_CDC_GET_ENCAPSULATED_RESPONSE ? "Read" : "Write", ctrl_req->bRequestType, ctrl_req->wLength, port); /* Load ctrl urb */ ctrl_urb->transfer_flags = 0; usb_fill_control_urb(ctrl_urb, serial->parent->usb, pipe, (u8 *) ctrl_req, ctrl_urb_data, size, ctrl_callback, serial); /* Send it on merry way */ result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); if (result) { dev_err(&ctrl_urb->dev->dev, "%s failed submit ctrl_urb %d type %d\n", __func__, result, type); return result; } /* done */ return size; } /* called by intr_callback when read occurs */ static int hso_mux_serial_read(struct hso_serial *serial) { if (!serial) return -EINVAL; /* clean data */ memset(serial->rx_data[0], 0, CTRL_URB_RX_SIZE); /* make the request */ if (serial->num_rx_urbs != 1) { dev_err(&serial->parent->interface->dev, "ERROR: mux'd reads with multiple buffers " "not possible\n"); return 0; } return mux_device_request(serial, USB_CDC_GET_ENCAPSULATED_RESPONSE, serial->parent->port_spec & HSO_PORT_MASK, serial->rx_urb[0], &serial->ctrl_req_rx, serial->rx_data[0], serial->rx_data_length); } /* used for muxed serial port callback (muxed serial read) */ static void intr_callback(struct urb *urb) { struct hso_shared_int *shared_int = urb->context; struct hso_serial *serial; unsigned char *port_req; int status = urb->status; int i; usb_mark_last_busy(urb->dev); /* sanity check */ if (!shared_int) return; /* status check */ if (status) { handle_usb_error(status, __func__, NULL); return; } D4("\n--- Got intr callback 0x%02X ---", status); /* what request? */ port_req = urb->transfer_buffer; D4(" port_req = 0x%.2X\n", *port_req); /* loop over all muxed ports to find the one sending this */ for (i = 0; i < 8; i++) { /* max 8 channels on MUX */ if (*port_req & (1 << i)) { serial = get_serial_by_shared_int_and_type(shared_int, (1 << i)); if (serial != NULL) { D1("Pending read interrupt on port %d\n", i); spin_lock(&serial->serial_lock); if (serial->rx_state == RX_IDLE && serial->open_count > 0) { /* Setup and send a ctrl req read on * port i */ if (!serial->rx_urb_filled[0]) { serial->rx_state = RX_SENT; hso_mux_serial_read(serial); } else serial->rx_state = RX_PENDING; } else { D1("Already a read pending on " "port %d or port not open\n", i); } spin_unlock(&serial->serial_lock); } } } /* Resubmit interrupt urb */ hso_mux_submit_intr_urb(shared_int, urb->dev, GFP_ATOMIC); } /* called for writing to muxed serial port */ static int hso_mux_serial_write_data(struct hso_serial *serial) { if (NULL == serial) return -EINVAL; return mux_device_request(serial, USB_CDC_SEND_ENCAPSULATED_COMMAND, serial->parent->port_spec & HSO_PORT_MASK, serial->tx_urb, &serial->ctrl_req_tx, serial->tx_data, serial->tx_data_count); } /* write callback for Diag and CS port */ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; struct tty_struct *tty; /* sanity check */ if (!serial) { D1("serial == NULL"); return; } spin_lock(&serial->serial_lock); serial->tx_urb_used = 0; tty = tty_kref_get(serial->tty); spin_unlock(&serial->serial_lock); if (status) { handle_usb_error(status, __func__, serial->parent); tty_kref_put(tty); return; } hso_put_activity(serial->parent); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } hso_kick_transmit(serial); D1(" "); return; } /* called for writing diag or CS serial port */ static int hso_std_serial_write_data(struct hso_serial *serial) { int count = serial->tx_data_count; int result; usb_fill_bulk_urb(serial->tx_urb, serial->parent->usb, usb_sndbulkpipe(serial->parent->usb, serial->out_endp-> bEndpointAddress & 0x7F), serial->tx_data, serial->tx_data_count, hso_std_serial_write_bulk_callback, serial); result = usb_submit_urb(serial->tx_urb, GFP_ATOMIC); if (result) { dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); return result; } return count; } /* callback after read or write on muxed serial port */ static void ctrl_callback(struct urb *urb) { struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; struct tty_struct *tty; /* sanity check */ if (!serial) return; spin_lock(&serial->serial_lock); serial->tx_urb_used = 0; tty = tty_kref_get(serial->tty); spin_unlock(&serial->serial_lock); if (status) { handle_usb_error(status, __func__, serial->parent); tty_kref_put(tty); return; } /* what request? */ req = (struct usb_ctrlrequest *)(urb->setup_packet); D4("\n--- Got muxed ctrl callback 0x%02X ---", status); D4("Actual length of urb = %d\n", urb->actual_length); DUMP1(urb->transfer_buffer, urb->actual_length); if (req->bRequestType == (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; spin_lock(&serial->serial_lock); put_rxbuf_data_and_resubmit_ctrl_urb(serial); spin_unlock(&serial->serial_lock); } else { hso_put_activity(serial->parent); if (tty) tty_wakeup(tty); /* response to a write command */ hso_kick_transmit(serial); } tty_kref_put(tty); } /* handle RX data for serial port */ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) { struct tty_struct *tty; int write_length_remaining = 0; int curr_write_len; /* Sanity check */ if (urb == NULL || serial == NULL) { D1("serial = NULL"); return -2; } /* All callers to put_rxbuf_data hold serial_lock */ tty = tty_kref_get(serial->tty); /* Push data to tty */ if (tty) { write_length_remaining = urb->actual_length - serial->curr_rx_urb_offset; D1("data to push to tty"); while (write_length_remaining) { if (test_bit(TTY_THROTTLED, &tty->flags)) { tty_kref_put(tty); return -1; } curr_write_len = tty_insert_flip_string (tty, urb->transfer_buffer + serial->curr_rx_urb_offset, write_length_remaining); serial->curr_rx_urb_offset += curr_write_len; write_length_remaining -= curr_write_len; tty_flip_buffer_push(tty); } } if (write_length_remaining == 0) { serial->curr_rx_urb_offset = 0; serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; } tty_kref_put(tty); return write_length_remaining; } /* Base driver functions */ static void hso_log_port(struct hso_device *hso_dev) { char *port_type; char port_dev[20]; switch (hso_dev->port_spec & HSO_PORT_MASK) { case HSO_PORT_CONTROL: port_type = "Control"; break; case HSO_PORT_APP: port_type = "Application"; break; case HSO_PORT_GPS: port_type = "GPS"; break; case HSO_PORT_GPS_CONTROL: port_type = "GPS control"; break; case HSO_PORT_APP2: port_type = "Application2"; break; case HSO_PORT_PCSC: port_type = "PCSC"; break; case HSO_PORT_DIAG: port_type = "Diagnostic"; break; case HSO_PORT_DIAG2: port_type = "Diagnostic2"; break; case HSO_PORT_MODEM: port_type = "Modem"; break; case HSO_PORT_NETWORK: port_type = "Network"; break; default: port_type = "Unknown"; break; } if ((hso_dev->port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { sprintf(port_dev, "%s", dev2net(hso_dev)->net->name); } else sprintf(port_dev, "/dev/%s%d", tty_filename, dev2ser(hso_dev)->minor); dev_dbg(&hso_dev->interface->dev, "HSO: Found %s port %s\n", port_type, port_dev); } static int hso_start_net_device(struct hso_device *hso_dev) { int i, result = 0; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return -ENODEV; /* send URBs for all read buffers */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { /* Prep a receive URB */ usb_fill_bulk_urb(hso_net->mux_bulk_rx_urb_pool[i], hso_dev->usb, usb_rcvbulkpipe(hso_dev->usb, hso_net->in_endp-> bEndpointAddress & 0x7F), hso_net->mux_bulk_rx_buf_pool[i], MUX_BULK_RX_BUF_SIZE, read_bulk_callback, hso_net); /* Put it out there so the device can send us stuff */ result = usb_submit_urb(hso_net->mux_bulk_rx_urb_pool[i], GFP_NOIO); if (result) dev_warn(&hso_dev->usb->dev, "%s failed mux_bulk_rx_urb[%d] %d\n", __func__, i, result); } return result; } static int hso_stop_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return -ENODEV; for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { if (hso_net->mux_bulk_rx_urb_pool[i]) usb_kill_urb(hso_net->mux_bulk_rx_urb_pool[i]); } if (hso_net->mux_bulk_tx_urb) usb_kill_urb(hso_net->mux_bulk_tx_urb); return 0; } static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags) { int i, result = 0; struct hso_serial *serial = dev2ser(hso_dev); if (!serial) return -ENODEV; /* If it is not the MUX port fill in and submit a bulk urb (already * allocated in hso_serial_start) */ if (!(serial->parent->port_spec & HSO_INTF_MUX)) { for (i = 0; i < serial->num_rx_urbs; i++) { usb_fill_bulk_urb(serial->rx_urb[i], serial->parent->usb, usb_rcvbulkpipe(serial->parent->usb, serial->in_endp-> bEndpointAddress & 0x7F), serial->rx_data[i], serial->rx_data_length, hso_std_serial_read_bulk_callback, serial); result = usb_submit_urb(serial->rx_urb[i], flags); if (result) { dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); break; } } } else { mutex_lock(&serial->shared_int->shared_int_lock); if (!serial->shared_int->use_count) { result = hso_mux_submit_intr_urb(serial->shared_int, hso_dev->usb, flags); } serial->shared_int->use_count++; mutex_unlock(&serial->shared_int->shared_int_lock); } if (serial->tiocmget) tiocmget_submit_urb(serial, serial->tiocmget, serial->parent->usb); return result; } static int hso_stop_serial_device(struct hso_device *hso_dev) { int i; struct hso_serial *serial = dev2ser(hso_dev); struct hso_tiocmget *tiocmget; if (!serial) return -ENODEV; for (i = 0; i < serial->num_rx_urbs; i++) { if (serial->rx_urb[i]) { usb_kill_urb(serial->rx_urb[i]); serial->rx_urb_filled[i] = 0; } } serial->curr_rx_urb_idx = 0; serial->curr_rx_urb_offset = 0; if (serial->tx_urb) usb_kill_urb(serial->tx_urb); if (serial->shared_int) { mutex_lock(&serial->shared_int->shared_int_lock); if (serial->shared_int->use_count && (--serial->shared_int->use_count == 0)) { struct urb *urb; urb = serial->shared_int->shared_intr_urb; if (urb) usb_kill_urb(urb); } mutex_unlock(&serial->shared_int->shared_int_lock); } tiocmget = serial->tiocmget; if (tiocmget) { wake_up_interruptible(&tiocmget->waitq); usb_kill_urb(tiocmget->urb); } return 0; } static void hso_serial_common_free(struct hso_serial *serial) { int i; if (serial->parent->dev) device_remove_file(serial->parent->dev, &dev_attr_hsotype); tty_unregister_device(tty_drv, serial->minor); for (i = 0; i < serial->num_rx_urbs; i++) { /* unlink and free RX URB */ usb_free_urb(serial->rx_urb[i]); /* free the RX buffer */ kfree(serial->rx_data[i]); } /* unlink and free TX URB */ usb_free_urb(serial->tx_urb); kfree(serial->tx_data); } static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { struct device *dev; int minor; int i; minor = get_free_serial_index(); if (minor < 0) goto exit; /* register our minor number */ serial->parent->dev = tty_register_device(tty_drv, minor, &serial->parent->interface->dev); dev = serial->parent->dev; dev_set_drvdata(dev, serial->parent); i = device_create_file(dev, &dev_attr_hsotype); /* fill in specific data for later use */ serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; /* RX, allocate urb and initialize */ /* prepare our RX buffer */ serial->rx_data_length = rx_size; for (i = 0; i < serial->num_rx_urbs; i++) { serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!serial->rx_urb[i]) { dev_err(dev, "Could not allocate urb?\n"); goto exit; } serial->rx_urb[i]->transfer_buffer = NULL; serial->rx_urb[i]->transfer_buffer_length = 0; serial->rx_data[i] = kzalloc(serial->rx_data_length, GFP_KERNEL); if (!serial->rx_data[i]) { dev_err(dev, "%s - Out of memory\n", __func__); goto exit; } } /* TX, allocate urb and initialize */ serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!serial->tx_urb) { dev_err(dev, "Could not allocate urb?\n"); goto exit; } serial->tx_urb->transfer_buffer = NULL; serial->tx_urb->transfer_buffer_length = 0; /* prepare our TX buffer */ serial->tx_data_count = 0; serial->tx_buffer_count = 0; serial->tx_data_length = tx_size; serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); if (!serial->tx_data) { dev_err(dev, "%s - Out of memory\n", __func__); goto exit; } serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); if (!serial->tx_buffer) { dev_err(dev, "%s - Out of memory\n", __func__); goto exit; } return 0; exit: hso_serial_common_free(serial); return -1; } /* Creates a general hso device */ static struct hso_device *hso_create_device(struct usb_interface *intf, int port_spec) { struct hso_device *hso_dev; hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC); if (!hso_dev) return NULL; hso_dev->port_spec = port_spec; hso_dev->usb = interface_to_usbdev(intf); hso_dev->interface = intf; kref_init(&hso_dev->ref); mutex_init(&hso_dev->mutex); INIT_WORK(&hso_dev->async_get_intf, async_get_intf); INIT_WORK(&hso_dev->async_put_intf, async_put_intf); INIT_WORK(&hso_dev->reset_device, reset_device); return hso_dev; } /* Removes a network device in the network device table */ static int remove_net_device(struct hso_device *hso_dev) { int i; for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == hso_dev) { network_table[i] = NULL; break; } } if (i == HSO_MAX_NET_DEVICES) return -1; return 0; } /* Frees our network device */ static void hso_free_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return; remove_net_device(hso_net->parent); if (hso_net->net) { unregister_netdev(hso_net->net); free_netdev(hso_net->net); } /* start freeing */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); kfree(hso_net->mux_bulk_rx_buf_pool[i]); hso_net->mux_bulk_rx_buf_pool[i] = NULL; } usb_free_urb(hso_net->mux_bulk_tx_urb); kfree(hso_net->mux_bulk_tx_buf); hso_net->mux_bulk_tx_buf = NULL; kfree(hso_dev); } static const struct net_device_ops hso_netdev_ops = { .ndo_open = hso_net_open, .ndo_stop = hso_net_close, .ndo_start_xmit = hso_net_start_xmit, .ndo_tx_timeout = hso_net_tx_timeout, }; /* initialize the network interface */ static void hso_net_init(struct net_device *net) { struct hso_net *hso_net = netdev_priv(net); D1("sizeof hso_net is %d", (int)sizeof(*hso_net)); /* fill in the other fields */ net->netdev_ops = &hso_netdev_ops; net->watchdog_timeo = HSO_NET_TX_TIMEOUT; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; net->type = ARPHRD_NONE; net->mtu = DEFAULT_MTU - 14; net->tx_queue_len = 10; SET_ETHTOOL_OPS(net, &ops); /* and initialize the semaphore */ spin_lock_init(&hso_net->net_lock); } /* Adds a network device in the network device table */ static int add_net_device(struct hso_device *hso_dev) { int i; for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == NULL) { network_table[i] = hso_dev; break; } } if (i == HSO_MAX_NET_DEVICES) return -1; return 0; } static int hso_rfkill_set_block(void *data, bool blocked) { struct hso_device *hso_dev = data; int enabled = !blocked; int rv; mutex_lock(&hso_dev->mutex); if (hso_dev->usb_gone) rv = 0; else rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0), enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); mutex_unlock(&hso_dev->mutex); return rv; } static const struct rfkill_ops hso_rfkill_ops = { .set_block = hso_rfkill_set_block, }; /* Creates and sets up everything for rfkill */ static void hso_create_rfkill(struct hso_device *hso_dev, struct usb_interface *interface) { struct hso_net *hso_net = dev2net(hso_dev); struct device *dev = &hso_net->net->dev; char *rfkn; rfkn = kzalloc(20, GFP_KERNEL); if (!rfkn) dev_err(dev, "%s - Out of memory\n", __func__); snprintf(rfkn, 20, "hso-%d", interface->altsetting->desc.bInterfaceNumber); hso_net->rfkill = rfkill_alloc(rfkn, &interface_to_usbdev(interface)->dev, RFKILL_TYPE_WWAN, &hso_rfkill_ops, hso_dev); if (!hso_net->rfkill) { dev_err(dev, "%s - Out of memory\n", __func__); kfree(rfkn); return; } if (rfkill_register(hso_net->rfkill) < 0) { rfkill_destroy(hso_net->rfkill); kfree(rfkn); hso_net->rfkill = NULL; dev_err(dev, "%s - Failed to register rfkill\n", __func__); return; } } static struct device_type hso_type = { .name = "wwan", }; /* Creates our network device */ static struct hso_device *hso_create_net_device(struct usb_interface *interface, int port_spec) { int result, i; struct net_device *net; struct hso_net *hso_net; struct hso_device *hso_dev; hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; /* allocate our network device, then we can put in our private data */ /* call hso_net_init to do the basic initialization */ net = alloc_netdev(sizeof(struct hso_net), "hso%d", hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); goto exit; } hso_net = netdev_priv(net); hso_dev->port_data.dev_net = hso_net; hso_net->net = net; hso_net->parent = hso_dev; hso_net->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); goto exit; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); goto exit; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); /* registering our net device */ result = register_netdev(net); if (result) { dev_err(&interface->dev, "Failed to register device\n"); goto exit; } /* start allocating */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) { dev_err(&interface->dev, "Could not allocate rx urb\n"); goto exit; } hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) { dev_err(&interface->dev, "Could not allocate rx buf\n"); goto exit; } } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) { dev_err(&interface->dev, "Could not allocate tx urb\n"); goto exit; } hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) { dev_err(&interface->dev, "Could not allocate tx buf\n"); goto exit; } add_net_device(hso_dev); hso_log_port(hso_dev); hso_create_rfkill(hso_dev, interface); return hso_dev; exit: hso_free_net_device(hso_dev); return NULL; } static void hso_free_tiomget(struct hso_serial *serial) { struct hso_tiocmget *tiocmget = serial->tiocmget; if (tiocmget) { if (tiocmget->urb) { usb_free_urb(tiocmget->urb); tiocmget->urb = NULL; } serial->tiocmget = NULL; kfree(tiocmget); } } /* Frees an AT channel ( goes for both mux and non-mux ) */ static void hso_free_serial_device(struct hso_device *hso_dev) { struct hso_serial *serial = dev2ser(hso_dev); if (!serial) return; set_serial_by_index(serial->minor, NULL); hso_serial_common_free(serial); if (serial->shared_int) { mutex_lock(&serial->shared_int->shared_int_lock); if (--serial->shared_int->ref_count == 0) hso_free_shared_int(serial->shared_int); else mutex_unlock(&serial->shared_int->shared_int_lock); } hso_free_tiomget(serial); kfree(serial); kfree(hso_dev); } /* Creates a bulk AT channel */ static struct hso_device *hso_create_bulk_serial_device( struct usb_interface *interface, int port) { struct hso_device *hso_dev; struct hso_serial *serial; int num_urbs; struct hso_tiocmget *tiocmget; hso_dev = hso_create_device(interface, port); if (!hso_dev) return NULL; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) goto exit; serial->parent = hso_dev; hso_dev->port_data.dev_serial = serial; if ((port & HSO_PORT_MASK) == HSO_PORT_MODEM) { num_urbs = 2; serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget), GFP_KERNEL); /* it isn't going to break our heart if serial->tiocmget * allocation fails don't bother checking this. */ if (serial->tiocmget) { tiocmget = serial->tiocmget; tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); if (tiocmget->urb) { mutex_init(&tiocmget->mutex); init_waitqueue_head(&tiocmget->waitq); tiocmget->endp = hso_get_ep( interface, USB_ENDPOINT_XFER_INT, USB_DIR_IN); } else hso_free_tiomget(serial); } } else num_urbs = 1; if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE, BULK_URB_TX_SIZE)) goto exit; serial->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_IN); if (!serial->in_endp) { dev_err(&interface->dev, "Failed to find BULK IN ep\n"); goto exit2; } if (! (serial->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { dev_err(&interface->dev, "Failed to find BULK IN ep\n"); goto exit2; } serial->write_data = hso_std_serial_write_data; /* and record this serial */ set_serial_by_index(serial->minor, serial); /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; exit2: hso_serial_common_free(serial); exit: hso_free_tiomget(serial); kfree(serial); kfree(hso_dev); return NULL; } /* Creates a multiplexed AT channel */ static struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, int port, struct hso_shared_int *mux) { struct hso_device *hso_dev; struct hso_serial *serial; int port_spec; port_spec = HSO_INTF_MUX; port_spec &= ~HSO_PORT_MASK; port_spec |= hso_mux_to_port(port); if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NO_PORT) return NULL; hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) goto exit; hso_dev->port_data.dev_serial = serial; serial->parent = hso_dev; if (hso_serial_common_create (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE)) goto exit; serial->tx_data_length--; serial->write_data = hso_mux_serial_write_data; serial->shared_int = mux; mutex_lock(&serial->shared_int->shared_int_lock); serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); /* and record this serial */ set_serial_by_index(serial->minor, serial); /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; exit: if (serial) { tty_unregister_device(tty_drv, serial->minor); kfree(serial); } if (hso_dev) kfree(hso_dev); return NULL; } static void hso_free_shared_int(struct hso_shared_int *mux) { usb_free_urb(mux->shared_intr_urb); kfree(mux->shared_intr_buf); mutex_unlock(&mux->shared_int_lock); kfree(mux); } static struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) { struct hso_shared_int *mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return NULL; mux->intr_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT, USB_DIR_IN); if (!mux->intr_endp) { dev_err(&interface->dev, "Can't find INT IN endpoint\n"); goto exit; } mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!mux->shared_intr_urb) { dev_err(&interface->dev, "Could not allocate intr urb?\n"); goto exit; } mux->shared_intr_buf = kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), GFP_KERNEL); if (!mux->shared_intr_buf) { dev_err(&interface->dev, "Could not allocate intr buf?\n"); goto exit; } mutex_init(&mux->shared_int_lock); return mux; exit: kfree(mux->shared_intr_buf); usb_free_urb(mux->shared_intr_urb); kfree(mux); return NULL; } /* Gets the port spec for a certain interface */ static int hso_get_config_data(struct usb_interface *interface) { struct usb_device *usbdev = interface_to_usbdev(interface); u8 config_data[17]; u32 if_num = interface->altsetting->desc.bInterfaceNumber; s32 result; if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x86, 0xC0, 0, 0, config_data, 17, USB_CTRL_SET_TIMEOUT) != 0x11) { return -EIO; } switch (config_data[if_num]) { case 0x0: result = 0; break; case 0x1: result = HSO_PORT_DIAG; break; case 0x2: result = HSO_PORT_GPS; break; case 0x3: result = HSO_PORT_GPS_CONTROL; break; case 0x4: result = HSO_PORT_APP; break; case 0x5: result = HSO_PORT_APP2; break; case 0x6: result = HSO_PORT_CONTROL; break; case 0x7: result = HSO_PORT_NETWORK; break; case 0x8: result = HSO_PORT_MODEM; break; case 0x9: result = HSO_PORT_MSD; break; case 0xa: result = HSO_PORT_PCSC; break; case 0xb: result = HSO_PORT_VOICE; break; default: result = 0; } if (result) result |= HSO_INTF_BULK; if (config_data[16] & 0x1) result |= HSO_INFO_CRC_BUG; return result; } /* called once for each interface upon device insertion */ static int hso_probe(struct usb_interface *interface, const struct usb_device_id *id) { int mux, i, if_num, port_spec; unsigned char port_mask; struct hso_device *hso_dev = NULL; struct hso_shared_int *shared_int; struct hso_device *tmp_dev = NULL; if_num = interface->altsetting->desc.bInterfaceNumber; /* Get the interface/port specification from either driver_info or from * the device itself */ if (id->driver_info) port_spec = ((u32 *)(id->driver_info))[if_num]; else port_spec = hso_get_config_data(interface); if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { dev_err(&interface->dev, "Not our interface\n"); return -ENODEV; } /* Check if we need to switch to alt interfaces prior to port * configuration */ if (interface->num_altsetting > 1) usb_set_interface(interface_to_usbdev(interface), if_num, 1); interface->needs_remote_wakeup = 1; /* Allocate new hso device(s) */ switch (port_spec & HSO_INTF_MASK) { case HSO_INTF_MUX: if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { /* Create the network device */ if (!disable_net) { hso_dev = hso_create_net_device(interface, port_spec); if (!hso_dev) goto exit; tmp_dev = hso_dev; } } if (hso_get_mux_ports(interface, &port_mask)) /* TODO: de-allocate everything */ goto exit; shared_int = hso_create_shared_int(interface); if (!shared_int) goto exit; for (i = 1, mux = 0; i < 0x100; i = i << 1, mux++) { if (port_mask & i) { hso_dev = hso_create_mux_serial_device( interface, i, shared_int); if (!hso_dev) goto exit; } } if (tmp_dev) hso_dev = tmp_dev; break; case HSO_INTF_BULK: /* It's a regular bulk interface */ if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && !disable_net) hso_dev = hso_create_net_device(interface, port_spec); else hso_dev = hso_create_bulk_serial_device(interface, port_spec); if (!hso_dev) goto exit; break; default: goto exit; } /* save our data pointer in this device */ usb_set_intfdata(interface, hso_dev); /* done */ return 0; exit: hso_free_interface(interface); return -ENODEV; } /* device removed, cleaning up */ static void hso_disconnect(struct usb_interface *interface) { hso_free_interface(interface); /* remove reference of our private data */ usb_set_intfdata(interface, NULL); } static void async_get_intf(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, async_get_intf); usb_autopm_get_interface(hso_dev->interface); } static void async_put_intf(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, async_put_intf); usb_autopm_put_interface(hso_dev->interface); } static int hso_get_activity(struct hso_device *hso_dev) { if (hso_dev->usb->state == USB_STATE_SUSPENDED) { if (!hso_dev->is_active) { hso_dev->is_active = 1; schedule_work(&hso_dev->async_get_intf); } } if (hso_dev->usb->state != USB_STATE_CONFIGURED) return -EAGAIN; usb_mark_last_busy(hso_dev->usb); return 0; } static int hso_put_activity(struct hso_device *hso_dev) { if (hso_dev->usb->state != USB_STATE_SUSPENDED) { if (hso_dev->is_active) { hso_dev->is_active = 0; schedule_work(&hso_dev->async_put_intf); return -EAGAIN; } } hso_dev->is_active = 0; return 0; } /* called by kernel when we need to suspend device */ static int hso_suspend(struct usb_interface *iface, pm_message_t message) { int i, result; /* Stop all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { result = hso_stop_serial_device(serial_table[i]); if (result) goto out; } } /* Stop all network ports */ for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == iface)) { result = hso_stop_net_device(network_table[i]); if (result) goto out; } } out: return 0; } /* called by kernel when we need to resume device */ static int hso_resume(struct usb_interface *iface) { int i, result = 0; struct hso_net *hso_net; /* Start all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { if (dev2ser(serial_table[i])->open_count) { result = hso_start_serial_device(serial_table[i], GFP_NOIO); hso_kick_transmit(dev2ser(serial_table[i])); if (result) goto out; } } } /* Start all network ports */ for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == iface)) { hso_net = dev2net(network_table[i]); if (hso_net->flags & IFF_UP) { /* First transmit any lingering data, then restart the device. */ if (hso_net->skb_tx_buf) { dev_dbg(&iface->dev, "Transmitting" " lingering data\n"); hso_net_start_xmit(hso_net->skb_tx_buf, hso_net->net); hso_net->skb_tx_buf = NULL; } result = hso_start_net_device(network_table[i]); if (result) goto out; } } } out: return result; } static void reset_device(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, reset_device); struct usb_device *usb = hso_dev->usb; int result; if (hso_dev->usb_gone) { D1("No reset during disconnect\n"); } else { result = usb_lock_device_for_reset(usb, hso_dev->interface); if (result < 0) D1("unable to lock device for reset: %d\n", result); else { usb_reset_device(usb); usb_unlock_device(usb); } } } static void hso_serial_ref_free(struct kref *ref) { struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); hso_free_serial_device(hso_dev); } static void hso_free_interface(struct usb_interface *interface) { struct hso_serial *hso_dev; struct tty_struct *tty; int i; for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == interface)) { hso_dev = dev2ser(serial_table[i]); spin_lock_irq(&hso_dev->serial_lock); tty = tty_kref_get(hso_dev->tty); spin_unlock_irq(&hso_dev->serial_lock); if (tty) tty_hangup(tty); mutex_lock(&hso_dev->parent->mutex); tty_kref_put(tty); hso_dev->parent->usb_gone = 1; mutex_unlock(&hso_dev->parent->mutex); kref_put(&serial_table[i]->ref, hso_serial_ref_free); } } for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == interface)) { struct rfkill *rfk = dev2net(network_table[i])->rfkill; /* hso_stop_net_device doesn't stop the net queue since * traffic needs to start it again when suspended */ netif_stop_queue(dev2net(network_table[i])->net); hso_stop_net_device(network_table[i]); cancel_work_sync(&network_table[i]->async_put_intf); cancel_work_sync(&network_table[i]->async_get_intf); if (rfk) { rfkill_unregister(rfk); rfkill_destroy(rfk); } hso_free_net_device(network_table[i]); } } } /* Helper functions */ /* Get the endpoint ! */ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, int type, int dir) { int i; struct usb_host_interface *iface = intf->cur_altsetting; struct usb_endpoint_descriptor *endp; for (i = 0; i < iface->desc.bNumEndpoints; i++) { endp = &iface->endpoint[i].desc; if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && (usb_endpoint_type(endp) == type)) return endp; } return NULL; } /* Get the byte that describes which ports are enabled */ static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports) { int i; struct usb_host_interface *iface = intf->cur_altsetting; if (iface->extralen == 3) { *ports = iface->extra[2]; return 0; } for (i = 0; i < iface->desc.bNumEndpoints; i++) { if (iface->endpoint[i].extralen == 3) { *ports = iface->endpoint[i].extra[2]; return 0; } } return -1; } /* interrupt urb needs to be submitted, used for serial read of muxed port */ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int, struct usb_device *usb, gfp_t gfp) { int result; usb_fill_int_urb(shared_int->shared_intr_urb, usb, usb_rcvintpipe(usb, shared_int->intr_endp->bEndpointAddress & 0x7F), shared_int->shared_intr_buf, 1, intr_callback, shared_int, shared_int->intr_endp->bInterval); result = usb_submit_urb(shared_int->shared_intr_urb, gfp); if (result) dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__, result); return result; } /* operations setup of the serial interface */ static const struct tty_operations hso_serial_ops = { .open = hso_serial_open, .close = hso_serial_close, .write = hso_serial_write, .write_room = hso_serial_write_room, .ioctl = hso_serial_ioctl, .set_termios = hso_serial_set_termios, .chars_in_buffer = hso_serial_chars_in_buffer, .tiocmget = hso_serial_tiocmget, .tiocmset = hso_serial_tiocmset, .unthrottle = hso_unthrottle }; static struct usb_driver hso_driver = { .name = driver_name, .probe = hso_probe, .disconnect = hso_disconnect, .id_table = hso_ids, .suspend = hso_suspend, .resume = hso_resume, .reset_resume = hso_resume, .supports_autosuspend = 1, }; static int __init hso_init(void) { int i; int result; /* put it in the log */ printk(KERN_INFO "hso: %s\n", version); /* Initialise the serial table semaphore and table */ spin_lock_init(&serial_table_lock); for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) serial_table[i] = NULL; /* allocate our driver using the proper amount of supported minors */ tty_drv = alloc_tty_driver(HSO_SERIAL_TTY_MINORS); if (!tty_drv) return -ENOMEM; /* fill in all needed values */ tty_drv->magic = TTY_DRIVER_MAGIC; tty_drv->owner = THIS_MODULE; tty_drv->driver_name = driver_name; tty_drv->name = tty_filename; /* if major number is provided as parameter, use that one */ if (tty_major) tty_drv->major = tty_major; tty_drv->minor_start = 0; tty_drv->num = HSO_SERIAL_TTY_MINORS; tty_drv->type = TTY_DRIVER_TYPE_SERIAL; tty_drv->subtype = SERIAL_TYPE_NORMAL; tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; tty_drv->init_termios = tty_std_termios; hso_init_termios(&tty_drv->init_termios); tty_set_operations(tty_drv, &hso_serial_ops); /* register the tty driver */ result = tty_register_driver(tty_drv); if (result) { printk(KERN_ERR "%s - tty_register_driver failed(%d)\n", __func__, result); return result; } /* register this module as an usb driver */ result = usb_register(&hso_driver); if (result) { printk(KERN_ERR "Could not register hso driver? error: %d\n", result); /* cleanup serial interface */ tty_unregister_driver(tty_drv); return result; } /* done */ return 0; } static void __exit hso_exit(void) { printk(KERN_INFO "hso: unloaded\n"); tty_unregister_driver(tty_drv); /* deregister the usb driver */ usb_deregister(&hso_driver); } /* Module definitions */ module_init(hso_init); module_exit(hso_exit); MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESCRIPTION); MODULE_LICENSE(MOD_LICENSE); MODULE_INFO(Version, DRIVER_VERSION); /* change the debug level (eg: insmod hso.ko debug=0x04) */ MODULE_PARM_DESC(debug, "Level of debug [0x01 | 0x02 | 0x04 | 0x08 | 0x10]"); module_param(debug, int, S_IRUGO | S_IWUSR); /* set the major tty number (eg: insmod hso.ko tty_major=245) */ MODULE_PARM_DESC(tty_major, "Set the major tty number"); module_param(tty_major, int, S_IRUGO | S_IWUSR); /* disable network interface (eg: insmod hso.ko disable_net=1) */ MODULE_PARM_DESC(disable_net, "Disable the network interface"); module_param(disable_net, int, S_IRUGO | S_IWUSR);
gpl-2.0
BhallaLab/moose-full
dependencies/gsl-2.0/eigen/genherm.c
38
5934
/* eigen/genherm.c * * Copyright (C) 2007 Patrick Alken * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <stdlib.h> #include <config.h> #include <gsl/gsl_eigen.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_math.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_complex.h> #include <gsl/gsl_complex_math.h> /* * This module computes the eigenvalues of a complex generalized * hermitian-definite eigensystem A x = \lambda B x, where A and * B are hermitian, and B is positive-definite. */ /* gsl_eigen_genherm_alloc() Allocate a workspace for solving the generalized hermitian-definite eigenvalue problem. The size of this workspace is O(3n). Inputs: n - size of matrices Return: pointer to workspace */ gsl_eigen_genherm_workspace * gsl_eigen_genherm_alloc(const size_t n) { gsl_eigen_genherm_workspace *w; if (n == 0) { GSL_ERROR_NULL ("matrix dimension must be positive integer", GSL_EINVAL); } w = (gsl_eigen_genherm_workspace *) calloc (1, sizeof (gsl_eigen_genherm_workspace)); if (w == 0) { GSL_ERROR_NULL ("failed to allocate space for workspace", GSL_ENOMEM); } w->size = n; w->herm_workspace_p = gsl_eigen_herm_alloc(n); if (!w->herm_workspace_p) { gsl_eigen_genherm_free(w); GSL_ERROR_NULL("failed to allocate space for herm workspace", GSL_ENOMEM); } return (w); } /* gsl_eigen_genherm_alloc() */ /* gsl_eigen_genherm_free() Free workspace w */ void gsl_eigen_genherm_free (gsl_eigen_genherm_workspace * w) { RETURN_IF_NULL (w); if (w->herm_workspace_p) gsl_eigen_herm_free(w->herm_workspace_p); free(w); } /* gsl_eigen_genherm_free() */ /* gsl_eigen_genherm() Solve the generalized hermitian-definite eigenvalue problem A x = \lambda B x for the eigenvalues \lambda. Inputs: A - complex hermitian matrix B - complex hermitian and positive definite matrix eval - where to store eigenvalues w - workspace Return: success or error */ int gsl_eigen_genherm (gsl_matrix_complex * A, gsl_matrix_complex * B, gsl_vector * eval, gsl_eigen_genherm_workspace * w) { const size_t N = A->size1; /* check matrix and vector sizes */ if (N != A->size2) { GSL_ERROR ("matrix must be square to compute eigenvalues", GSL_ENOTSQR); } else if ((N != B->size1) || (N != B->size2)) { GSL_ERROR ("B matrix dimensions must match A", GSL_EBADLEN); } else if (eval->size != N) { GSL_ERROR ("eigenvalue vector must match matrix size", GSL_EBADLEN); } else if (w->size != N) { GSL_ERROR ("matrix size does not match workspace", GSL_EBADLEN); } else { int s; /* compute Cholesky factorization of B */ s = gsl_linalg_complex_cholesky_decomp(B); if (s != GSL_SUCCESS) return s; /* B is not positive definite */ /* transform to standard hermitian eigenvalue problem */ gsl_eigen_genherm_standardize(A, B); s = gsl_eigen_herm(A, eval, w->herm_workspace_p); return s; } } /* gsl_eigen_genherm() */ /* gsl_eigen_genherm_standardize() Reduce the generalized hermitian-definite eigenproblem to the standard hermitian eigenproblem by computing C = L^{-1} A L^{-H} where L L^H is the Cholesky decomposition of B Inputs: A - (input/output) complex hermitian matrix B - complex hermitian, positive definite matrix in Cholesky form Return: success Notes: A is overwritten by L^{-1} A L^{-H} */ int gsl_eigen_genherm_standardize(gsl_matrix_complex *A, const gsl_matrix_complex *B) { const size_t N = A->size1; size_t i; double a, b; gsl_complex y, z; GSL_SET_IMAG(&z, 0.0); for (i = 0; i < N; ++i) { /* update lower triangle of A(i:n, i:n) */ y = gsl_matrix_complex_get(A, i, i); a = GSL_REAL(y); y = gsl_matrix_complex_get(B, i, i); b = GSL_REAL(y); a /= b * b; GSL_SET_REAL(&z, a); gsl_matrix_complex_set(A, i, i, z); if (i < N - 1) { gsl_vector_complex_view ai = gsl_matrix_complex_subcolumn(A, i, i + 1, N - i - 1); gsl_matrix_complex_view ma = gsl_matrix_complex_submatrix(A, i + 1, i + 1, N - i - 1, N - i - 1); gsl_vector_complex_const_view bi = gsl_matrix_complex_const_subcolumn(B, i, i + 1, N - i - 1); gsl_matrix_complex_const_view mb = gsl_matrix_complex_const_submatrix(B, i + 1, i + 1, N - i - 1, N - i - 1); gsl_blas_zdscal(1.0 / b, &ai.vector); GSL_SET_REAL(&z, -0.5 * a); gsl_blas_zaxpy(z, &bi.vector, &ai.vector); gsl_blas_zher2(CblasLower, GSL_COMPLEX_NEGONE, &ai.vector, &bi.vector, &ma.matrix); gsl_blas_zaxpy(z, &bi.vector, &ai.vector); gsl_blas_ztrsv(CblasLower, CblasNoTrans, CblasNonUnit, &mb.matrix, &ai.vector); } } return GSL_SUCCESS; } /* gsl_eigen_genherm_standardize() */
gpl-2.0
grate-driver/linux-2.6
net/irda/irnet/irnet_ppp.c
38
32898
/* * IrNET protocol module : Synchronous PPP over an IrDA socket. * * Jean II - HPL `00 - <jt@hpl.hp.com> * * This file implement the PPP interface and /dev/irnet character device. * The PPP interface hook to the ppp_generic module, handle all our * relationship to the PPP code in the kernel (and by extension to pppd), * and exchange PPP frames with this module (send/receive). * The /dev/irnet device is used primarily for 2 functions : * 1) as a stub for pppd (the ppp daemon), so that we can appropriately * generate PPP sessions (we pretend we are a tty). * 2) as a control channel (write commands, read events) */ #include <linux/sched.h> #include <linux/smp_lock.h> #include "irnet_ppp.h" /* Private header */ /* Please put other headers in irnet.h - Thanks */ /* Generic PPP callbacks (to call us) */ static struct ppp_channel_ops irnet_ppp_ops = { .start_xmit = ppp_irnet_send, .ioctl = ppp_irnet_ioctl }; /************************* CONTROL CHANNEL *************************/ /* * When a pppd instance is not active on /dev/irnet, it acts as a control * channel. * Writing allow to set up the IrDA destination of the IrNET channel, * and any application may be read events happening in IrNET... */ /*------------------------------------------------------------------*/ /* * Write is used to send a command to configure a IrNET channel * before it is open by pppd. The syntax is : "command argument" * Currently there is only two defined commands : * o name : set the requested IrDA nickname of the IrNET peer. * o addr : set the requested IrDA address of the IrNET peer. * Note : the code is crude, but effective... */ static inline ssize_t irnet_ctrl_write(irnet_socket * ap, const char __user *buf, size_t count) { char command[IRNET_MAX_COMMAND]; char * start; /* Current command being processed */ char * next; /* Next command to process */ int length; /* Length of current command */ DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); /* Check for overflow... */ DABORT(count >= IRNET_MAX_COMMAND, -ENOMEM, CTRL_ERROR, "Too much data !!!\n"); /* Get the data in the driver */ if(copy_from_user(command, buf, count)) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } /* Safe terminate the string */ command[count] = '\0'; DEBUG(CTRL_INFO, "Command line received is ``%s'' (%Zd).\n", command, count); /* Check every commands in the command line */ next = command; while(next != NULL) { /* Look at the next command */ start = next; /* Scrap whitespaces before the command */ start = skip_spaces(start); /* ',' is our command separator */ next = strchr(start, ','); if(next) { *next = '\0'; /* Terminate command */ length = next - start; /* Length */ next++; /* Skip the '\0' */ } else length = strlen(start); DEBUG(CTRL_INFO, "Found command ``%s'' (%d).\n", start, length); /* Check if we recognised one of the known command * We can't use "switch" with strings, so hack with "continue" */ /* First command : name -> Requested IrDA nickname */ if(!strncmp(start, "name", 4)) { /* Copy the name only if is included and not "any" */ if((length > 5) && (strcmp(start + 5, "any"))) { /* Strip out trailing whitespaces */ while(isspace(start[length - 1])) length--; /* Copy the name for later reuse */ memcpy(ap->rname, start + 5, length - 5); ap->rname[length - 5] = '\0'; } else ap->rname[0] = '\0'; DEBUG(CTRL_INFO, "Got rname = ``%s''\n", ap->rname); /* Restart the loop */ continue; } /* Second command : addr, daddr -> Requested IrDA destination address * Also process : saddr -> Requested IrDA source address */ if((!strncmp(start, "addr", 4)) || (!strncmp(start, "daddr", 5)) || (!strncmp(start, "saddr", 5))) { __u32 addr = DEV_ADDR_ANY; /* Copy the address only if is included and not "any" */ if((length > 5) && (strcmp(start + 5, "any"))) { char * begp = start + 5; char * endp; /* Scrap whitespaces before the command */ begp = skip_spaces(begp); /* Convert argument to a number (last arg is the base) */ addr = simple_strtoul(begp, &endp, 16); /* Has it worked ? (endp should be start + length) */ DABORT(endp <= (start + 5), -EINVAL, CTRL_ERROR, "Invalid address.\n"); } /* Which type of address ? */ if(start[0] == 's') { /* Save it */ ap->rsaddr = addr; DEBUG(CTRL_INFO, "Got rsaddr = %08x\n", ap->rsaddr); } else { /* Save it */ ap->rdaddr = addr; DEBUG(CTRL_INFO, "Got rdaddr = %08x\n", ap->rdaddr); } /* Restart the loop */ continue; } /* Other possible command : connect N (number of retries) */ /* No command matched -> Failed... */ DABORT(1, -EINVAL, CTRL_ERROR, "Not a recognised IrNET command.\n"); } /* Success : we have parsed all commands successfully */ return(count); } #ifdef INITIAL_DISCOVERY /*------------------------------------------------------------------*/ /* * Function irnet_get_discovery_log (self) * * Query the content on the discovery log if not done * * This function query the current content of the discovery log * at the startup of the event channel and save it in the internal struct. */ static void irnet_get_discovery_log(irnet_socket * ap) { __u16 mask = irlmp_service_to_hint(S_LAN); /* Ask IrLMP for the current discovery log */ ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask, DISCOVERY_DEFAULT_SLOTS); /* Check if the we got some results */ if(ap->discoveries == NULL) ap->disco_number = -1; DEBUG(CTRL_INFO, "Got the log (0x%p), size is %d\n", ap->discoveries, ap->disco_number); } /*------------------------------------------------------------------*/ /* * Function irnet_read_discovery_log (self, event) * * Read the content on the discovery log * * This function dump the current content of the discovery log * at the startup of the event channel. * Return 1 if wrote an event on the control channel... * * State of the ap->disco_XXX variables : * Socket creation : discoveries = NULL ; disco_index = 0 ; disco_number = 0 * While reading : discoveries = ptr ; disco_index = X ; disco_number = Y * After reading : discoveries = NULL ; disco_index = Y ; disco_number = -1 */ static inline int irnet_read_discovery_log(irnet_socket * ap, char * event) { int done_event = 0; DENTER(CTRL_TRACE, "(ap=0x%p, event=0x%p)\n", ap, event); /* Test if we have some work to do or we have already finished */ if(ap->disco_number == -1) { DEBUG(CTRL_INFO, "Already done\n"); return 0; } /* Test if it's the first time and therefore we need to get the log */ if(ap->discoveries == NULL) irnet_get_discovery_log(ap); /* Check if we have more item to dump */ if(ap->disco_index < ap->disco_number) { /* Write an event */ sprintf(event, "Found %08x (%s) behind %08x {hints %02X-%02X}\n", ap->discoveries[ap->disco_index].daddr, ap->discoveries[ap->disco_index].info, ap->discoveries[ap->disco_index].saddr, ap->discoveries[ap->disco_index].hints[0], ap->discoveries[ap->disco_index].hints[1]); DEBUG(CTRL_INFO, "Writing discovery %d : %s\n", ap->disco_index, ap->discoveries[ap->disco_index].info); /* We have an event */ done_event = 1; /* Next discovery */ ap->disco_index++; } /* Check if we have done the last item */ if(ap->disco_index >= ap->disco_number) { /* No more items : remove the log and signal termination */ DEBUG(CTRL_INFO, "Cleaning up log (0x%p)\n", ap->discoveries); if(ap->discoveries != NULL) { /* Cleanup our copy of the discovery log */ kfree(ap->discoveries); ap->discoveries = NULL; } ap->disco_number = -1; } return done_event; } #endif /* INITIAL_DISCOVERY */ /*------------------------------------------------------------------*/ /* * Read is used to get IrNET events */ static inline ssize_t irnet_ctrl_read(irnet_socket * ap, struct file * file, char __user * buf, size_t count) { DECLARE_WAITQUEUE(wait, current); char event[64]; /* Max event is 61 char */ ssize_t ret = 0; DENTER(CTRL_TRACE, "(ap=0x%p, count=%Zd)\n", ap, count); /* Check if we can write an event out in one go */ DABORT(count < sizeof(event), -EOVERFLOW, CTRL_ERROR, "Buffer to small.\n"); #ifdef INITIAL_DISCOVERY /* Check if we have read the log */ if(irnet_read_discovery_log(ap, event)) { /* We have an event !!! Copy it to the user */ if(copy_to_user(buf, event, strlen(event))) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } DEXIT(CTRL_TRACE, "\n"); return(strlen(event)); } #endif /* INITIAL_DISCOVERY */ /* Put ourselves on the wait queue to be woken up */ add_wait_queue(&irnet_events.rwait, &wait); current->state = TASK_INTERRUPTIBLE; for(;;) { /* If there is unread events */ ret = 0; if(ap->event_index != irnet_events.index) break; ret = -EAGAIN; if(file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if(signal_pending(current)) break; /* Yield and wait to be woken up */ schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&irnet_events.rwait, &wait); /* Did we got it ? */ if(ret != 0) { /* No, return the error code */ DEXIT(CTRL_TRACE, " - ret %Zd\n", ret); return ret; } /* Which event is it ? */ switch(irnet_events.log[ap->event_index].event) { case IRNET_DISCOVER: sprintf(event, "Discovered %08x (%s) behind %08x {hints %02X-%02X}\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr, irnet_events.log[ap->event_index].hints.byte[0], irnet_events.log[ap->event_index].hints.byte[1]); break; case IRNET_EXPIRE: sprintf(event, "Expired %08x (%s) behind %08x {hints %02X-%02X}\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr, irnet_events.log[ap->event_index].hints.byte[0], irnet_events.log[ap->event_index].hints.byte[1]); break; case IRNET_CONNECT_TO: sprintf(event, "Connected to %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_CONNECT_FROM: sprintf(event, "Connection from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_REQUEST_FROM: sprintf(event, "Request from %08x (%s) behind %08x\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].saddr); break; case IRNET_NOANSWER_FROM: sprintf(event, "No-answer from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_BLOCKED_LINK: sprintf(event, "Blocked link with %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_DISCONNECT_FROM: sprintf(event, "Disconnection from %08x (%s) on ppp%d\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name, irnet_events.log[ap->event_index].unit); break; case IRNET_DISCONNECT_TO: sprintf(event, "Disconnected to %08x (%s)\n", irnet_events.log[ap->event_index].daddr, irnet_events.log[ap->event_index].name); break; default: sprintf(event, "Bug\n"); } /* Increment our event index */ ap->event_index = (ap->event_index + 1) % IRNET_MAX_EVENTS; DEBUG(CTRL_INFO, "Event is :%s", event); /* Copy it to the user */ if(copy_to_user(buf, event, strlen(event))) { DERROR(CTRL_ERROR, "Invalid user space pointer.\n"); return -EFAULT; } DEXIT(CTRL_TRACE, "\n"); return(strlen(event)); } /*------------------------------------------------------------------*/ /* * Poll : called when someone do a select on /dev/irnet. * Just check if there are new events... */ static inline unsigned int irnet_ctrl_poll(irnet_socket * ap, struct file * file, poll_table * wait) { unsigned int mask; DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap); poll_wait(file, &irnet_events.rwait, wait); mask = POLLOUT | POLLWRNORM; /* If there is unread events */ if(ap->event_index != irnet_events.index) mask |= POLLIN | POLLRDNORM; #ifdef INITIAL_DISCOVERY if(ap->disco_number != -1) { /* Test if it's the first time and therefore we need to get the log */ if(ap->discoveries == NULL) irnet_get_discovery_log(ap); /* Recheck */ if(ap->disco_number != -1) mask |= POLLIN | POLLRDNORM; } #endif /* INITIAL_DISCOVERY */ DEXIT(CTRL_TRACE, " - mask=0x%X\n", mask); return mask; } /*********************** FILESYSTEM CALLBACKS ***********************/ /* * Implement the usual open, read, write functions that will be called * by the file system when some action is performed on /dev/irnet. * Most of those actions will in fact be performed by "pppd" or * the control channel, we just act as a redirector... */ /*------------------------------------------------------------------*/ /* * Open : when somebody open /dev/irnet * We basically create a new instance of irnet and initialise it. */ static int dev_irnet_open(struct inode * inode, struct file * file) { struct irnet_socket * ap; int err; DENTER(FS_TRACE, "(file=0x%p)\n", file); #ifdef SECURE_DEVIRNET /* This could (should?) be enforced by the permissions on /dev/irnet. */ if(!capable(CAP_NET_ADMIN)) return -EPERM; #endif /* SECURE_DEVIRNET */ /* Allocate a private structure for this IrNET instance */ ap = kzalloc(sizeof(*ap), GFP_KERNEL); DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); lock_kernel(); /* initialize the irnet structure */ ap->file = file; /* PPP channel setup */ ap->ppp_open = 0; ap->chan.private = ap; ap->chan.ops = &irnet_ppp_ops; ap->chan.mtu = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); ap->chan.hdrlen = 2 + TTP_MAX_HEADER; /* for A/C + Max IrDA hdr */ /* PPP parameters */ ap->mru = (2048 - TTP_MAX_HEADER - 2 - PPP_HDRLEN); ap->xaccm[0] = ~0U; ap->xaccm[3] = 0x60000000U; ap->raccm = ~0U; /* Setup the IrDA part... */ err = irda_irnet_create(ap); if(err) { DERROR(FS_ERROR, "Can't setup IrDA link...\n"); kfree(ap); unlock_kernel(); return err; } /* For the control channel */ ap->event_index = irnet_events.index; /* Cancel all past events */ /* Put our stuff where we will be able to find it later */ file->private_data = ap; DEXIT(FS_TRACE, " - ap=0x%p\n", ap); unlock_kernel(); return 0; } /*------------------------------------------------------------------*/ /* * Close : when somebody close /dev/irnet * Destroy the instance of /dev/irnet */ static int dev_irnet_close(struct inode * inode, struct file * file) { irnet_socket * ap = (struct irnet_socket *) file->private_data; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); DABORT(ap == NULL, 0, FS_ERROR, "ap is NULL !!!\n"); /* Detach ourselves */ file->private_data = NULL; /* Close IrDA stuff */ irda_irnet_destroy(ap); /* Disconnect from the generic PPP layer if not already done */ if(ap->ppp_open) { DERROR(FS_ERROR, "Channel still registered - deregistering !\n"); ap->ppp_open = 0; ppp_unregister_channel(&ap->chan); } kfree(ap); DEXIT(FS_TRACE, "\n"); return 0; } /*------------------------------------------------------------------*/ /* * Write does nothing. * (we receive packet from ppp_generic through ppp_irnet_send()) */ static ssize_t dev_irnet_write(struct file * file, const char __user *buf, size_t count, loff_t * ppos) { irnet_socket * ap = (struct irnet_socket *) file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(ap->ppp_open) return -EAGAIN; else return irnet_ctrl_write(ap, buf, count); } /*------------------------------------------------------------------*/ /* * Read doesn't do much either. * (pppd poll us, but ultimately reads through /dev/ppp) */ static ssize_t dev_irnet_read(struct file * file, char __user * buf, size_t count, loff_t * ppos) { irnet_socket * ap = (struct irnet_socket *) file->private_data; DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", file, ap, count); DABORT(ap == NULL, -ENXIO, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(ap->ppp_open) return -EAGAIN; else return irnet_ctrl_read(ap, file, buf, count); } /*------------------------------------------------------------------*/ /* * Poll : called when someone do a select on /dev/irnet */ static unsigned int dev_irnet_poll(struct file * file, poll_table * wait) { irnet_socket * ap = (struct irnet_socket *) file->private_data; unsigned int mask; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); mask = POLLOUT | POLLWRNORM; DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ if(!ap->ppp_open) mask |= irnet_ctrl_poll(ap, file, wait); DEXIT(FS_TRACE, " - mask=0x%X\n", mask); return(mask); } /*------------------------------------------------------------------*/ /* * IOCtl : Called when someone does some ioctls on /dev/irnet * This is the way pppd configure us and control us while the PPP * instance is active. */ static long dev_irnet_ioctl( struct file * file, unsigned int cmd, unsigned long arg) { irnet_socket * ap = (struct irnet_socket *) file->private_data; int err; int val; void __user *argp = (void __user *)arg; DENTER(FS_TRACE, "(file=0x%p, ap=0x%p, cmd=0x%X)\n", file, ap, cmd); /* Basic checks... */ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); #ifdef SECURE_DEVIRNET if(!capable(CAP_NET_ADMIN)) return -EPERM; #endif /* SECURE_DEVIRNET */ err = -EFAULT; switch(cmd) { /* Set discipline (should be N_SYNC_PPP or N_TTY) */ case TIOCSETD: if(get_user(val, (int __user *)argp)) break; if((val == N_SYNC_PPP) || (val == N_PPP)) { DEBUG(FS_INFO, "Entering PPP discipline.\n"); /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ lock_kernel(); err = ppp_register_channel(&ap->chan); if(err == 0) { /* Our ppp side is active */ ap->ppp_open = 1; DEBUG(FS_INFO, "Trying to establish a connection.\n"); /* Setup the IrDA link now - may fail... */ irda_irnet_connect(ap); } else DERROR(FS_ERROR, "Can't setup PPP channel...\n"); unlock_kernel(); } else { /* In theory, should be N_TTY */ DEBUG(FS_INFO, "Exiting PPP discipline.\n"); /* Disconnect from the generic PPP layer */ lock_kernel(); if(ap->ppp_open) { ap->ppp_open = 0; ppp_unregister_channel(&ap->chan); } else DERROR(FS_ERROR, "Channel not registered !\n"); err = 0; unlock_kernel(); } break; /* Query PPP channel and unit number */ case PPPIOCGCHAN: lock_kernel(); if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) err = 0; unlock_kernel(); break; case PPPIOCGUNIT: lock_kernel(); if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) err = 0; unlock_kernel(); break; /* All these ioctls can be passed both directly and from ppp_generic, * so we just deal with them in one place... */ case PPPIOCGFLAGS: case PPPIOCSFLAGS: case PPPIOCGASYNCMAP: case PPPIOCSASYNCMAP: case PPPIOCGRASYNCMAP: case PPPIOCSRASYNCMAP: case PPPIOCGXASYNCMAP: case PPPIOCSXASYNCMAP: case PPPIOCGMRU: case PPPIOCSMRU: DEBUG(FS_INFO, "Standard PPP ioctl.\n"); if(!capable(CAP_NET_ADMIN)) err = -EPERM; else { lock_kernel(); err = ppp_irnet_ioctl(&ap->chan, cmd, arg); unlock_kernel(); } break; /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ /* Get termios */ case TCGETS: DEBUG(FS_INFO, "Get termios.\n"); lock_kernel(); #ifndef TCGETS2 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) err = 0; #else if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) err = 0; #endif unlock_kernel(); break; /* Set termios */ case TCSETSF: DEBUG(FS_INFO, "Set termios.\n"); lock_kernel(); #ifndef TCGETS2 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) err = 0; #else if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) err = 0; #endif unlock_kernel(); break; /* Set DTR/RTS */ case TIOCMBIS: case TIOCMBIC: /* Set exclusive/non-exclusive mode */ case TIOCEXCL: case TIOCNXCL: DEBUG(FS_INFO, "TTY compatibility.\n"); err = 0; break; case TCGETA: DEBUG(FS_INFO, "TCGETA\n"); break; case TCFLSH: DEBUG(FS_INFO, "TCFLSH\n"); /* Note : this will flush buffers in PPP, so it *must* be done * We should also worry that we don't accept junk here and that * we get rid of our own buffers */ #ifdef FLUSH_TO_PPP lock_kernel(); ppp_output_wakeup(&ap->chan); unlock_kernel(); #endif /* FLUSH_TO_PPP */ err = 0; break; case FIONREAD: DEBUG(FS_INFO, "FIONREAD\n"); val = 0; if(put_user(val, (int __user *)argp)) break; err = 0; break; default: DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); err = -ENOTTY; } DEXIT(FS_TRACE, " - err = 0x%X\n", err); return err; } /************************** PPP CALLBACKS **************************/ /* * This are the functions that the generic PPP driver in the kernel * will call to communicate to us. */ /*------------------------------------------------------------------*/ /* * Prepare the ppp frame for transmission over the IrDA socket. * We make sure that the header space is enough, and we change ppp header * according to flags passed by pppd. * This is not a callback, but just a helper function used in ppp_irnet_send() */ static inline struct sk_buff * irnet_prepare_skb(irnet_socket * ap, struct sk_buff * skb) { unsigned char * data; int proto; /* PPP protocol */ int islcp; /* Protocol == LCP */ int needaddr; /* Need PPP address */ DENTER(PPP_TRACE, "(ap=0x%p, skb=0x%p)\n", ap, skb); /* Extract PPP protocol from the frame */ data = skb->data; proto = (data[0] << 8) + data[1]; /* LCP packets with codes between 1 (configure-request) * and 7 (code-reject) must be sent as though no options * have been negotiated. */ islcp = (proto == PPP_LCP) && (1 <= data[2]) && (data[2] <= 7); /* compress protocol field if option enabled */ if((data[0] == 0) && (ap->flags & SC_COMP_PROT) && (!islcp)) skb_pull(skb,1); /* Check if we need address/control fields */ needaddr = 2*((ap->flags & SC_COMP_AC) == 0 || islcp); /* Is the skb headroom large enough to contain all IrDA-headers? */ if((skb_headroom(skb) < (ap->max_header_size + needaddr)) || (skb_shared(skb))) { struct sk_buff * new_skb; DEBUG(PPP_INFO, "Reallocating skb\n"); /* Create a new skb */ new_skb = skb_realloc_headroom(skb, ap->max_header_size + needaddr); /* We have to free the original skb anyway */ dev_kfree_skb(skb); /* Did the realloc succeed ? */ DABORT(new_skb == NULL, NULL, PPP_ERROR, "Could not realloc skb\n"); /* Use the new skb instead */ skb = new_skb; } /* prepend address/control fields if necessary */ if(needaddr) { skb_push(skb, 2); skb->data[0] = PPP_ALLSTATIONS; skb->data[1] = PPP_UI; } DEXIT(PPP_TRACE, "\n"); return skb; } /*------------------------------------------------------------------*/ /* * Send a packet to the peer over the IrTTP connection. * Returns 1 iff the packet was accepted. * Returns 0 iff packet was not consumed. * If the packet was not accepted, we will call ppp_output_wakeup * at some later time to reactivate flow control in ppp_generic. */ static int ppp_irnet_send(struct ppp_channel * chan, struct sk_buff * skb) { irnet_socket * self = (struct irnet_socket *) chan->private; int ret; DENTER(PPP_TRACE, "(channel=0x%p, ap/self=0x%p)\n", chan, self); /* Check if things are somewhat valid... */ DASSERT(self != NULL, 0, PPP_ERROR, "Self is NULL !!!\n"); /* Check if we are connected */ if(!(test_bit(0, &self->ttp_open))) { #ifdef CONNECT_IN_SEND /* Let's try to connect one more time... */ /* Note : we won't be connected after this call, but we should be * ready for next packet... */ /* If we are already connecting, this will fail */ irda_irnet_connect(self); #endif /* CONNECT_IN_SEND */ DEBUG(PPP_INFO, "IrTTP not ready ! (%ld-%ld)\n", self->ttp_open, self->ttp_connect); /* Note : we can either drop the packet or block the packet. * * Blocking the packet allow us a better connection time, * because by calling ppp_output_wakeup() we can have * ppp_generic resending the LCP request immediately to us, * rather than waiting for one of pppd periodic transmission of * LCP request. * * On the other hand, if we block all packet, all those periodic * transmissions of pppd accumulate in ppp_generic, creating a * backlog of LCP request. When we eventually connect later on, * we have to transmit all this backlog before we can connect * proper (if we don't timeout before). * * The current strategy is as follow : * While we are attempting to connect, we block packets to get * a better connection time. * If we fail to connect, we drain the queue and start dropping packets */ #ifdef BLOCK_WHEN_CONNECT /* If we are attempting to connect */ if(test_bit(0, &self->ttp_connect)) { /* Blocking packet, ppp_generic will retry later */ return 0; } #endif /* BLOCK_WHEN_CONNECT */ /* Dropping packet, pppd will retry later */ dev_kfree_skb(skb); return 1; } /* Check if the queue can accept any packet, otherwise block */ if(self->tx_flow != FLOW_START) DRETURN(0, PPP_INFO, "IrTTP queue full (%d skbs)...\n", skb_queue_len(&self->tsap->tx_queue)); /* Prepare ppp frame for transmission */ skb = irnet_prepare_skb(self, skb); DABORT(skb == NULL, 1, PPP_ERROR, "Prepare skb for Tx failed.\n"); /* Send the packet to IrTTP */ ret = irttp_data_request(self->tsap, skb); if(ret < 0) { /* * > IrTTPs tx queue is full, so we just have to * > drop the frame! You might think that we should * > just return -1 and don't deallocate the frame, * > but that is dangerous since it's possible that * > we have replaced the original skb with a new * > one with larger headroom, and that would really * > confuse do_dev_queue_xmit() in dev.c! I have * > tried :-) DB * Correction : we verify the flow control above (self->tx_flow), * so we come here only if IrTTP doesn't like the packet (empty, * too large, IrTTP not connected). In those rare cases, it's ok * to drop it, we don't want to see it here again... * Jean II */ DERROR(PPP_ERROR, "IrTTP doesn't like this packet !!! (0x%X)\n", ret); /* irttp_data_request already free the packet */ } DEXIT(PPP_TRACE, "\n"); return 1; /* Packet has been consumed */ } /*------------------------------------------------------------------*/ /* * Take care of the ioctls that ppp_generic doesn't want to deal with... * Note : we are also called from dev_irnet_ioctl(). */ static int ppp_irnet_ioctl(struct ppp_channel * chan, unsigned int cmd, unsigned long arg) { irnet_socket * ap = (struct irnet_socket *) chan->private; int err; int val; u32 accm[8]; void __user *argp = (void __user *)arg; DENTER(PPP_TRACE, "(channel=0x%p, ap=0x%p, cmd=0x%X)\n", chan, ap, cmd); /* Basic checks... */ DASSERT(ap != NULL, -ENXIO, PPP_ERROR, "ap is NULL...\n"); err = -EFAULT; switch(cmd) { /* PPP flags */ case PPPIOCGFLAGS: val = ap->flags | ap->rbits; if(put_user(val, (int __user *) argp)) break; err = 0; break; case PPPIOCSFLAGS: if(get_user(val, (int __user *) argp)) break; ap->flags = val & ~SC_RCV_BITS; ap->rbits = val & SC_RCV_BITS; err = 0; break; /* Async map stuff - all dummy to please pppd */ case PPPIOCGASYNCMAP: if(put_user(ap->xaccm[0], (u32 __user *) argp)) break; err = 0; break; case PPPIOCSASYNCMAP: if(get_user(ap->xaccm[0], (u32 __user *) argp)) break; err = 0; break; case PPPIOCGRASYNCMAP: if(put_user(ap->raccm, (u32 __user *) argp)) break; err = 0; break; case PPPIOCSRASYNCMAP: if(get_user(ap->raccm, (u32 __user *) argp)) break; err = 0; break; case PPPIOCGXASYNCMAP: if(copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) break; err = 0; break; case PPPIOCSXASYNCMAP: if(copy_from_user(accm, argp, sizeof(accm))) break; accm[2] &= ~0x40000000U; /* can't escape 0x5e */ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); err = 0; break; /* Max PPP frame size */ case PPPIOCGMRU: if(put_user(ap->mru, (int __user *) argp)) break; err = 0; break; case PPPIOCSMRU: if(get_user(val, (int __user *) argp)) break; if(val < PPP_MRU) val = PPP_MRU; ap->mru = val; err = 0; break; default: DEBUG(PPP_INFO, "Unsupported ioctl (0x%X)\n", cmd); err = -ENOIOCTLCMD; } DEXIT(PPP_TRACE, " - err = 0x%X\n", err); return err; } /************************** INITIALISATION **************************/ /* * Module initialisation and all that jazz... */ /*------------------------------------------------------------------*/ /* * Hook our device callbacks in the filesystem, to connect our code * to /dev/irnet */ static inline int __init ppp_irnet_init(void) { int err = 0; DENTER(MODULE_TRACE, "()\n"); /* Allocate ourselves as a minor in the misc range */ err = misc_register(&irnet_misc_device); DEXIT(MODULE_TRACE, "\n"); return err; } /*------------------------------------------------------------------*/ /* * Cleanup at exit... */ static inline void __exit ppp_irnet_cleanup(void) { DENTER(MODULE_TRACE, "()\n"); /* De-allocate /dev/irnet minor in misc range */ misc_deregister(&irnet_misc_device); DEXIT(MODULE_TRACE, "\n"); } /*------------------------------------------------------------------*/ /* * Module main entry point */ static int __init irnet_init(void) { int err; /* Initialise both parts... */ err = irda_irnet_init(); if(!err) err = ppp_irnet_init(); return err; } /*------------------------------------------------------------------*/ /* * Module exit */ static void __exit irnet_cleanup(void) { irda_irnet_cleanup(); ppp_irnet_cleanup(); } /*------------------------------------------------------------------*/ /* * Module magic */ module_init(irnet_init); module_exit(irnet_cleanup); MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>"); MODULE_DESCRIPTION("IrNET : Synchronous PPP over IrDA"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(10, 187);
gpl-2.0
rockly703/original-linux-2.6.28
arch/powerpc/kernel/rtas.c
38
21681
/* * * Procedures for interfacing to the RTAS on CHRP machines. * * Peter Bergner, IBM March 2001. * Copyright (C) 2001 IBM. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/lmb.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/hvcall.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/page.h> #include <asm/param.h> #include <asm/system.h> #include <asm/delay.h> #include <asm/uaccess.h> #include <asm/udbg.h> #include <asm/syscalls.h> #include <asm/smp.h> #include <asm/atomic.h> struct rtas_t rtas = { .lock = SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); struct rtas_suspend_me_data { atomic_t working; /* number of cpus accessing this struct */ int token; /* ibm,suspend-me */ int error; struct completion *complete; /* wait on this until working == 0 */ }; DEFINE_SPINLOCK(rtas_data_buf_lock); EXPORT_SYMBOL(rtas_data_buf_lock); char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; EXPORT_SYMBOL(rtas_data_buf); unsigned long rtas_rmo_buf; /* * If non-NULL, this gets called when the kernel terminates. * This is done like this so rtas_flash can be a module. */ void (*rtas_flash_term_hook)(int); EXPORT_SYMBOL(rtas_flash_term_hook); /* * call_rtas_display_status and call_rtas_display_status_delay * are designed only for very early low-level debugging, which * is why the token is hard-coded to 10. */ static void call_rtas_display_status(char c) { struct rtas_args *args = &rtas.args; unsigned long s; if (!rtas.base) return; spin_lock_irqsave(&rtas.lock, s); args->token = 10; args->nargs = 1; args->nret = 1; args->rets = (rtas_arg_t *)&(args->args[1]); args->args[0] = (unsigned char)c; enter_rtas(__pa(args)); spin_unlock_irqrestore(&rtas.lock, s); } static void call_rtas_display_status_delay(char c) { static int pending_newline = 0; /* did last write end with unprinted newline? */ static int width = 16; if (c == '\n') { while (width-- > 0) call_rtas_display_status(' '); width = 16; mdelay(500); pending_newline = 1; } else { if (pending_newline) { call_rtas_display_status('\r'); call_rtas_display_status('\n'); } pending_newline = 0; if (width--) { call_rtas_display_status(c); udelay(10000); } } } void __init udbg_init_rtas_panel(void) { udbg_putc = call_rtas_display_status_delay; } #ifdef CONFIG_UDBG_RTAS_CONSOLE /* If you think you're dying before early_init_dt_scan_rtas() does its * work, you can hard code the token values for your firmware here and * hardcode rtas.base/entry etc. */ static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE; static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE; static void udbg_rtascon_putc(char c) { int tries; if (!rtas.base) return; /* Add CRs before LFs */ if (c == '\n') udbg_rtascon_putc('\r'); /* if there is more than one character to be displayed, wait a bit */ for (tries = 0; tries < 16; tries++) { if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0) break; udelay(1000); } } static int udbg_rtascon_getc_poll(void) { int c; if (!rtas.base) return -1; if (rtas_call(rtas_getchar_token, 0, 2, &c)) return -1; return c; } static int udbg_rtascon_getc(void) { int c; while ((c = udbg_rtascon_getc_poll()) == -1) ; return c; } void __init udbg_init_rtas_console(void) { udbg_putc = udbg_rtascon_putc; udbg_getc = udbg_rtascon_getc; udbg_getc_poll = udbg_rtascon_getc_poll; } #endif /* CONFIG_UDBG_RTAS_CONSOLE */ void rtas_progress(char *s, unsigned short hex) { struct device_node *root; int width; const int *p; char *os; static int display_character, set_indicator; static int display_width, display_lines, form_feed; static const int *row_width; static DEFINE_SPINLOCK(progress_lock); static int current_line; static int pending_newline = 0; /* did last write end with unprinted newline? */ if (!rtas.base) return; if (display_width == 0) { display_width = 0x10; if ((root = of_find_node_by_path("/rtas"))) { if ((p = of_get_property(root, "ibm,display-line-length", NULL))) display_width = *p; if ((p = of_get_property(root, "ibm,form-feed", NULL))) form_feed = *p; if ((p = of_get_property(root, "ibm,display-number-of-lines", NULL))) display_lines = *p; row_width = of_get_property(root, "ibm,display-truncation-length", NULL); of_node_put(root); } display_character = rtas_token("display-character"); set_indicator = rtas_token("set-indicator"); } if (display_character == RTAS_UNKNOWN_SERVICE) { /* use hex display if available */ if (set_indicator != RTAS_UNKNOWN_SERVICE) rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex); return; } spin_lock(&progress_lock); /* * Last write ended with newline, but we didn't print it since * it would just clear the bottom line of output. Print it now * instead. * * If no newline is pending and form feed is supported, clear the * display with a form feed; otherwise, print a CR to start output * at the beginning of the line. */ if (pending_newline) { rtas_call(display_character, 1, 1, NULL, '\r'); rtas_call(display_character, 1, 1, NULL, '\n'); pending_newline = 0; } else { current_line = 0; if (form_feed) rtas_call(display_character, 1, 1, NULL, (char)form_feed); else rtas_call(display_character, 1, 1, NULL, '\r'); } if (row_width) width = row_width[current_line]; else width = display_width; os = s; while (*os) { if (*os == '\n' || *os == '\r') { /* If newline is the last character, save it * until next call to avoid bumping up the * display output. */ if (*os == '\n' && !os[1]) { pending_newline = 1; current_line++; if (current_line > display_lines-1) current_line = display_lines-1; spin_unlock(&progress_lock); return; } /* RTAS wants CR-LF, not just LF */ if (*os == '\n') { rtas_call(display_character, 1, 1, NULL, '\r'); rtas_call(display_character, 1, 1, NULL, '\n'); } else { /* CR might be used to re-draw a line, so we'll * leave it alone and not add LF. */ rtas_call(display_character, 1, 1, NULL, *os); } if (row_width) width = row_width[current_line]; else width = display_width; } else { width--; rtas_call(display_character, 1, 1, NULL, *os); } os++; /* if we overwrite the screen length */ if (width <= 0) while ((*os != 0) && (*os != '\n') && (*os != '\r')) os++; } spin_unlock(&progress_lock); } EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */ int rtas_token(const char *service) { const int *tokp; if (rtas.dev == NULL) return RTAS_UNKNOWN_SERVICE; tokp = of_get_property(rtas.dev, service, NULL); return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; } EXPORT_SYMBOL(rtas_token); int rtas_service_present(const char *service) { return rtas_token(service) != RTAS_UNKNOWN_SERVICE; } EXPORT_SYMBOL(rtas_service_present); #ifdef CONFIG_RTAS_ERROR_LOGGING /* * Return the firmware-specified size of the error log buffer * for all rtas calls that require an error buffer argument. * This includes 'check-exception' and 'rtas-last-error'. */ int rtas_get_error_log_max(void) { static int rtas_error_log_max; if (rtas_error_log_max) return rtas_error_log_max; rtas_error_log_max = rtas_token ("rtas-error-log-max"); if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) || (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) { printk (KERN_WARNING "RTAS: bad log buffer size %d\n", rtas_error_log_max); rtas_error_log_max = RTAS_ERROR_LOG_MAX; } return rtas_error_log_max; } EXPORT_SYMBOL(rtas_get_error_log_max); static char rtas_err_buf[RTAS_ERROR_LOG_MAX]; static int rtas_last_error_token; /** Return a copy of the detailed error text associated with the * most recent failed call to rtas. Because the error text * might go stale if there are any other intervening rtas calls, * this routine must be called atomically with whatever produced * the error (i.e. with rtas.lock still held from the previous call). */ static char *__fetch_rtas_last_error(char *altbuf) { struct rtas_args err_args, save_args; u32 bufsz; char *buf = NULL; if (rtas_last_error_token == -1) return NULL; bufsz = rtas_get_error_log_max(); err_args.token = rtas_last_error_token; err_args.nargs = 2; err_args.nret = 1; err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf); err_args.args[1] = bufsz; err_args.args[2] = 0; save_args = rtas.args; rtas.args = err_args; enter_rtas(__pa(&rtas.args)); err_args = rtas.args; rtas.args = save_args; /* Log the error in the unlikely case that there was one. */ if (unlikely(err_args.args[2] == 0)) { if (altbuf) { buf = altbuf; } else { buf = rtas_err_buf; if (mem_init_done) buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); } if (buf) memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX); } return buf; } #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL) #else /* CONFIG_RTAS_ERROR_LOGGING */ #define __fetch_rtas_last_error(x) NULL #define get_errorlog_buffer() NULL #endif int rtas_call(int token, int nargs, int nret, int *outputs, ...) { va_list list; int i; unsigned long s; struct rtas_args *rtas_args; char *buff_copy = NULL; int ret; if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) return -1; /* Gotta do something different here, use global lock for now... */ spin_lock_irqsave(&rtas.lock, s); rtas_args = &rtas.args; rtas_args->token = token; rtas_args->nargs = nargs; rtas_args->nret = nret; rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]); va_start(list, outputs); for (i = 0; i < nargs; ++i) rtas_args->args[i] = va_arg(list, rtas_arg_t); va_end(list); for (i = 0; i < nret; ++i) rtas_args->rets[i] = 0; enter_rtas(__pa(rtas_args)); /* A -1 return code indicates that the last command couldn't be completed due to a hardware error. */ if (rtas_args->rets[0] == -1) buff_copy = __fetch_rtas_last_error(NULL); if (nret > 1 && outputs != NULL) for (i = 0; i < nret-1; ++i) outputs[i] = rtas_args->rets[i+1]; ret = (nret > 0)? rtas_args->rets[0]: 0; /* Gotta do something different here, use global lock for now... */ spin_unlock_irqrestore(&rtas.lock, s); if (buff_copy) { log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); if (mem_init_done) kfree(buff_copy); } return ret; } EXPORT_SYMBOL(rtas_call); /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. */ unsigned int rtas_busy_delay_time(int status) { int order; unsigned int ms = 0; if (status == RTAS_BUSY) { ms = 1; } else if (status >= 9900 && status <= 9905) { order = status - 9900; for (ms = 1; order > 0; order--) ms *= 10; } return ms; } EXPORT_SYMBOL(rtas_busy_delay_time); /* For an RTAS busy status code, perform the hinted delay. */ unsigned int rtas_busy_delay(int status) { unsigned int ms; might_sleep(); ms = rtas_busy_delay_time(status); if (ms) msleep(ms); return ms; } EXPORT_SYMBOL(rtas_busy_delay); static int rtas_error_rc(int rtas_rc) { int rc; switch (rtas_rc) { case -1: /* Hardware Error */ rc = -EIO; break; case -3: /* Bad indicator/domain/etc */ rc = -EINVAL; break; case -9000: /* Isolation error */ rc = -EFAULT; break; case -9001: /* Outstanding TCE/PTE */ rc = -EEXIST; break; case -9002: /* No usable slot */ rc = -ENODEV; break; default: printk(KERN_ERR "%s: unexpected RTAS error %d\n", __func__, rtas_rc); rc = -ERANGE; break; } return rc; } int rtas_get_power_level(int powerdomain, int *level) { int token = rtas_token("get-power-level"); int rc; if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT; while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY) udelay(1); if (rc < 0) return rtas_error_rc(rc); return rc; } EXPORT_SYMBOL(rtas_get_power_level); int rtas_set_power_level(int powerdomain, int level, int *setlevel) { int token = rtas_token("set-power-level"); int rc; if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT; do { rc = rtas_call(token, 2, 2, setlevel, powerdomain, level); } while (rtas_busy_delay(rc)); if (rc < 0) return rtas_error_rc(rc); return rc; } EXPORT_SYMBOL(rtas_set_power_level); int rtas_get_sensor(int sensor, int index, int *state) { int token = rtas_token("get-sensor-state"); int rc; if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT; do { rc = rtas_call(token, 2, 2, state, sensor, index); } while (rtas_busy_delay(rc)); if (rc < 0) return rtas_error_rc(rc); return rc; } EXPORT_SYMBOL(rtas_get_sensor); int rtas_set_indicator(int indicator, int index, int new_value) { int token = rtas_token("set-indicator"); int rc; if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT; do { rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); } while (rtas_busy_delay(rc)); if (rc < 0) return rtas_error_rc(rc); return rc; } EXPORT_SYMBOL(rtas_set_indicator); /* * Ignoring RTAS extended delay */ int rtas_set_indicator_fast(int indicator, int index, int new_value) { int rc; int token = rtas_token("set-indicator"); if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT; rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value); WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905)); if (rc < 0) return rtas_error_rc(rc); return rc; } void rtas_restart(char *cmd) { if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_RESTART); printk("RTAS system-reboot returned %d\n", rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); for (;;); } void rtas_power_off(void) { if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_POWER_OFF); /* allow power on only with power button press */ printk("RTAS power-off returned %d\n", rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); for (;;); } void rtas_halt(void) { if (rtas_flash_term_hook) rtas_flash_term_hook(SYS_HALT); /* allow power on only with power button press */ printk("RTAS power-off returned %d\n", rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); for (;;); } /* Must be in the RMO region, so we place it here */ static char rtas_os_term_buf[2048]; void rtas_os_term(char *str) { int status; if (panic_timeout) return; if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term")) return; snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); do { status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, __pa(rtas_os_term_buf)); } while (rtas_busy_delay(status)); if (status != 0) printk(KERN_EMERG "ibm,os-term call failed %d\n", status); } static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; #ifdef CONFIG_PPC_PSERIES static void rtas_percpu_suspend_me(void *info) { long rc; unsigned long msr_save; int cpu; struct rtas_suspend_me_data *data = (struct rtas_suspend_me_data *)info; atomic_inc(&data->working); /* really need to ensure MSR.EE is off for H_JOIN */ msr_save = mfmsr(); mtmsr(msr_save & ~(MSR_EE)); rc = plpar_hcall_norets(H_JOIN); mtmsr(msr_save); if (rc == H_SUCCESS) { /* This cpu was prodded and the suspend is complete. */ goto out; } else if (rc == H_CONTINUE) { /* All other cpus are in H_JOIN, this cpu does * the suspend. */ printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); data->error = rtas_call(data->token, 0, 1, NULL); if (data->error) printk(KERN_DEBUG "ibm,suspend-me returned %d\n", data->error); } else { printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", smp_processor_id(), rc); data->error = rc; } /* This cpu did the suspend or got an error; in either case, * we need to prod all other other cpus out of join state. * Extra prods are harmless. */ for_each_online_cpu(cpu) plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); out: if (atomic_dec_return(&data->working) == 0) complete(data->complete); } static int rtas_ibm_suspend_me(struct rtas_args *args) { long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, ((u64)args->args[0] << 32) | args->args[1]); state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); return rc; } else if (state == H_VASI_ENABLED) { args->args[args->nargs] = RTAS_NOT_SUSPENDABLE; return 0; } else if (state != H_VASI_SUSPENDING) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", state); args->args[args->nargs] = -1; return 0; } atomic_set(&data.working, 0); data.token = rtas_token("ibm,suspend-me"); data.error = 0; data.complete = &done; /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) data.error = -EINVAL; wait_for_completion(&done); if (data.error != 0) printk(KERN_ERR "Error doing global join\n"); return data.error; } #else /* CONFIG_PPC_PSERIES */ static int rtas_ibm_suspend_me(struct rtas_args *args) { return -ENOSYS; } #endif asmlinkage int ppc_rtas(struct rtas_args __user *uargs) { struct rtas_args args; unsigned long flags; char *buff_copy, *errbuf = NULL; int nargs; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) return -EFAULT; nargs = args.nargs; if (nargs > ARRAY_SIZE(args.args) || args.nret > ARRAY_SIZE(args.args) || nargs + args.nret > ARRAY_SIZE(args.args)) return -EINVAL; /* Copy in args. */ if (copy_from_user(args.args, uargs->args, nargs * sizeof(rtas_arg_t)) != 0) return -EFAULT; if (args.token == RTAS_UNKNOWN_SERVICE) return -EINVAL; args.rets = &args.args[nargs]; memset(args.rets, 0, args.nret * sizeof(rtas_arg_t)); /* Need to handle ibm,suspend_me call specially */ if (args.token == ibm_suspend_me_token) { rc = rtas_ibm_suspend_me(&args); if (rc) return rc; goto copy_return; } buff_copy = get_errorlog_buffer(); spin_lock_irqsave(&rtas.lock, flags); rtas.args = args; enter_rtas(__pa(&rtas.args)); args = rtas.args; /* A -1 return code indicates that the last command couldn't be completed due to a hardware error. */ if (args.rets[0] == -1) errbuf = __fetch_rtas_last_error(buff_copy); spin_unlock_irqrestore(&rtas.lock, flags); if (buff_copy) { if (errbuf) log_error(errbuf, ERR_TYPE_RTAS_LOG, 0); kfree(buff_copy); } copy_return: /* Copy out args. */ if (copy_to_user(uargs->args + nargs, args.args + nargs, args.nret * sizeof(rtas_arg_t)) != 0) return -EFAULT; return 0; } /* * Call early during boot, before mem init or bootmem, to retrieve the RTAS * informations from the device-tree and allocate the RMO buffer for userland * accesses. */ void __init rtas_initialize(void) { unsigned long rtas_region = RTAS_INSTANTIATE_MAX; /* Get RTAS dev node and fill up our "rtas" structure with infos * about it. */ rtas.dev = of_find_node_by_name(NULL, "rtas"); if (rtas.dev) { const u32 *basep, *entryp, *sizep; basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); sizep = of_get_property(rtas.dev, "rtas-size", NULL); if (basep != NULL && sizep != NULL) { rtas.base = *basep; rtas.size = *sizep; entryp = of_get_property(rtas.dev, "linux,rtas-entry", NULL); if (entryp == NULL) /* Ugh */ rtas.entry = rtas.base; else rtas.entry = *entryp; } else rtas.dev = NULL; } if (!rtas.dev) return; /* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any */ #ifdef CONFIG_PPC64 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } #endif rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); #endif } int __init early_init_dt_scan_rtas(unsigned long node, const char *uname, int depth, void *data) { u32 *basep, *entryp, *sizep; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); if (basep && entryp && sizep) { rtas.base = *basep; rtas.entry = *entryp; rtas.size = *sizep; } #ifdef CONFIG_UDBG_RTAS_CONSOLE basep = of_get_flat_dt_prop(node, "put-term-char", NULL); if (basep) rtas_putchar_token = *basep; basep = of_get_flat_dt_prop(node, "get-term-char", NULL); if (basep) rtas_getchar_token = *basep; if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE && rtas_getchar_token != RTAS_UNKNOWN_SERVICE) udbg_init_rtas_console(); #endif /* break now */ return 1; }
gpl-2.0
wido/libvirt
src/vbox/vbox_V3_0.c
38
1395
/** @file vbox_V3_0.c * C file to include support for multiple versions of VirtualBox * at runtime. */ /* * Copyright (C) 2008-2009 Sun Microsystems, Inc. * * This file is part of a free software library; you can redistribute * it and/or modify it under the terms of the GNU Lesser General * Public License version 2.1 as published by the Free Software * Foundation and shipped in the "COPYING.LESSER" file with this library. * The library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY of any kind. * * Sun LGPL Disclaimer: For the avoidance of doubt, except that if * any license choice other than GPL or LGPL is available it will * apply instead, Sun elects to use only the Lesser General Public * License version 2.1 (LGPLv2) at this time for any software where * a choice of LGPL license versions is made available with the * language indicating that LGPLv2 or any later version may be used, * or where a choice of which version of the LGPL is applied is * otherwise unspecified. * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa * Clara, CA 95054 USA or visit http://www.sun.com if you need * additional information or have any questions. */ #include <config.h> /** The API Version */ #define VBOX_API_VERSION 3000000 /** Version specific prefix. */ #define NAME(name) vbox30##name #include "vbox_tmpl.c"
gpl-2.0
Wenzel/kvm
crypto/asymmetric_keys/mscode_parser.c
38
2992
/* Parse a Microsoft Individual Code Signing blob * * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "MSCODE: "fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/oid_registry.h> #include <crypto/pkcs7.h> #include "verify_pefile.h" #include "mscode-asn1.h" /* * Parse a Microsoft Individual Code Signing blob */ int mscode_parse(struct pefile_context *ctx) { const void *content_data; size_t data_len; int ret; ret = pkcs7_get_content_data(ctx->pkcs7, &content_data, &data_len, 1); if (ret) { pr_debug("PKCS#7 message does not contain data\n"); return ret; } pr_devel("Data: %zu [%*ph]\n", data_len, (unsigned)(data_len), content_data); return asn1_ber_decoder(&mscode_decoder, ctx, content_data, data_len); } /* * Check the content type OID */ int mscode_note_content_type(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { enum OID oid; oid = look_up_OID(value, vlen); if (oid == OID__NR) { char buffer[50]; sprint_oid(value, vlen, buffer, sizeof(buffer)); pr_err("Unknown OID: %s\n", buffer); return -EBADMSG; } /* * pesign utility had a bug where it was putting * OID_msIndividualSPKeyPurpose instead of OID_msPeImageDataObjId * So allow both OIDs. */ if (oid != OID_msPeImageDataObjId && oid != OID_msIndividualSPKeyPurpose) { pr_err("Unexpected content type OID %u\n", oid); return -EBADMSG; } return 0; } /* * Note the digest algorithm OID */ int mscode_note_digest_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pefile_context *ctx = context; char buffer[50]; enum OID oid; oid = look_up_OID(value, vlen); switch (oid) { case OID_md4: ctx->digest_algo = "md4"; break; case OID_md5: ctx->digest_algo = "md5"; break; case OID_sha1: ctx->digest_algo = "sha1"; break; case OID_sha256: ctx->digest_algo = "sha256"; break; case OID_sha384: ctx->digest_algo = "sha384"; break; case OID_sha512: ctx->digest_algo = "sha512"; break; case OID_sha224: ctx->digest_algo = "sha224"; break; case OID__NR: sprint_oid(value, vlen, buffer, sizeof(buffer)); pr_err("Unknown OID: %s\n", buffer); return -EBADMSG; default: pr_err("Unsupported content type: %u\n", oid); return -ENOPKG; } return 0; } /* * Note the digest we're guaranteeing with this certificate */ int mscode_note_digest(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pefile_context *ctx = context; ctx->digest = value; ctx->digest_len = vlen; return 0; }
gpl-2.0
OrN/dolphin
Externals/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp
38
28833
// //Copyright (C) 2002-2005 3Dlabs Inc. Ltd. //Copyright (C) 2013 LunarG, Inc. //All rights reserved. // //Redistribution and use in source and binary forms, with or without //modification, are permitted provided that the following conditions //are met: // // Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // Neither the name of 3Dlabs Inc. Ltd. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS //"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT //LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS //FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE //COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, //INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, //BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER //CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT //LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN //ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE //POSSIBILITY OF SUCH DAMAGE. // /****************************************************************************\ Copyright (c) 2002, NVIDIA Corporation. NVIDIA Corporation("NVIDIA") supplies this software to you in consideration of your agreement to the following terms, and your use, installation, modification or redistribution of this NVIDIA software constitutes acceptance of these terms. If you do not agree with these terms, please do not use, install, modify or redistribute this NVIDIA software. In consideration of your agreement to abide by the following terms, and subject to these terms, NVIDIA grants you a personal, non-exclusive license, under NVIDIA's copyrights in this original NVIDIA software (the "NVIDIA Software"), to use, reproduce, modify and redistribute the NVIDIA Software, with or without modifications, in source and/or binary forms; provided that if you redistribute the NVIDIA Software, you must retain the copyright notice of NVIDIA, this notice and the following text and disclaimers in all such redistributions of the NVIDIA Software. Neither the name, trademarks, service marks nor logos of NVIDIA Corporation may be used to endorse or promote products derived from the NVIDIA Software without specific prior written permission from NVIDIA. Except as expressly stated in this notice, no other rights or licenses express or implied, are granted by NVIDIA herein, including but not limited to any patent rights that may be infringed by your derivative works or by other works in which the NVIDIA Software may be incorporated. No hardware is licensed hereunder. THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER PRODUCTS. IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \****************************************************************************/ // // scanner.c // #define _CRT_SECURE_NO_WARNINGS #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "PpContext.h" #include "PpTokens.h" #include "../Scan.h" namespace glslang { int TPpContext::InitScanner() { // Add various atoms needed by the CPP line scanner: if (!InitCPP()) return 0; previous_token = '\n'; return 1; } /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Floating point constants: ///////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// /* * lFloatConst() - Scan a single- or double-precision floating point constant. Assumes that the scanner * has seen at least one digit, followed by either a decimal '.' or the * letter 'e', or a precision ending (e.g., F or LF). */ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken) { bool HasDecimalOrExponent = false; int declen; int str_len; int isDouble = 0; declen = 0; str_len=len; char* str = ppToken->name; if (ch == '.') { HasDecimalOrExponent = true; str[len++] = (char)ch; ch = getChar(); while (ch >= '0' && ch <= '9') { if (len < MaxTokenLength) { declen++; if (len > 0 || ch != '0') { str[len] = (char)ch; len++; str_len++; } ch = getChar(); } else { parseContext.ppError(ppToken->loc, "float literal too long", "", ""); len = 1; str_len = 1; } } } // Exponent: if (ch == 'e' || ch == 'E') { HasDecimalOrExponent = true; if (len >= MaxTokenLength) { parseContext.ppError(ppToken->loc, "float literal too long", "", ""); len = 1; str_len = 1; } else { str[len++] = (char)ch; ch = getChar(); if (ch == '+') { str[len++] = (char)ch; ch = getChar(); } else if (ch == '-') { str[len++] = (char)ch; ch = getChar(); } if (ch >= '0' && ch <= '9') { while (ch >= '0' && ch <= '9') { if (len < MaxTokenLength) { str[len++] = (char)ch; ch = getChar(); } else { parseContext.ppError(ppToken->loc, "float literal too long", "", ""); len = 1; str_len = 1; } } } else { parseContext.ppError(ppToken->loc, "bad character in float exponent", "", ""); } } } if (len == 0) { ppToken->dval = 0.0; strcpy(str, "0.0"); } else { if (ch == 'l' || ch == 'L') { parseContext.doubleCheck(ppToken->loc, "double floating-point suffix"); if (! HasDecimalOrExponent) parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); int ch2 = getChar(); if (ch2 != 'f' && ch2 != 'F') { ungetChar(); ungetChar(); } else { if (len < MaxTokenLength) { str[len++] = (char)ch; str[len++] = (char)ch2; isDouble = 1; } else { parseContext.ppError(ppToken->loc, "float literal too long", "", ""); len = 1,str_len=1; } } } else if (ch == 'f' || ch == 'F') { parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix"); if (! parseContext.relaxedErrors()) parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix"); if (! HasDecimalOrExponent) parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); if (len < MaxTokenLength) str[len++] = (char)ch; else { parseContext.ppError(ppToken->loc, "float literal too long", "", ""); len = 1,str_len=1; } } else ungetChar(); str[len]='\0'; ppToken->dval = strtod(str, nullptr); } if (isDouble) return PpAtomConstDouble; else return PpAtomConstFloat; } // // Scanner used to tokenize source stream. // int TPpContext::tStringInput::scan(TPpToken* ppToken) { char* tokenText = ppToken->name; int AlreadyComplained = 0; int len = 0; int ch = 0; int ii = 0; unsigned long long ival = 0; bool enableInt64 = pp->parseContext.version >= 450 && pp->parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64); ppToken->ival = 0; ppToken->i64val = 0; ppToken->space = false; ch = getch(); for (;;) { while (ch == ' ' || ch == '\t') { ppToken->space = true; ch = getch(); } ppToken->loc = pp->parseContext.getCurrentLoc(); len = 0; switch (ch) { default: // Single character token, including EndOfInput, '#' and '\' (escaped newlines are handled at a lower level, so this is just a '\' token) return ch; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': do { if (len < MaxTokenLength) { tokenText[len++] = (char)ch; ch = getch(); } else { if (! AlreadyComplained) { pp->parseContext.ppError(ppToken->loc, "name too long", "", ""); AlreadyComplained = 1; } ch = getch(); } } while ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || ch == '_'); // line continuation with no token before or after makes len == 0, and need to start over skipping white space, etc. if (len == 0) continue; tokenText[len] = '\0'; ungetch(); ppToken->atom = pp->LookUpAddString(tokenText); return PpAtomIdentifier; case '0': ppToken->name[len++] = (char)ch; ch = getch(); if (ch == 'x' || ch == 'X') { // must be hexidecimal bool isUnsigned = false; bool isInt64 = false; ppToken->name[len++] = (char)ch; ch = getch(); if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) { ival = 0; do { if (ival <= 0x0fffffff || (enableInt64 && ival <= 0x0fffffffffffffffull)) { ppToken->name[len++] = (char)ch; if (ch >= '0' && ch <= '9') { ii = ch - '0'; } else if (ch >= 'A' && ch <= 'F') { ii = ch - 'A' + 10; } else if (ch >= 'a' && ch <= 'f') { ii = ch - 'a' + 10; } else pp->parseContext.ppError(ppToken->loc, "bad digit in hexidecimal literal", "", ""); ival = (ival << 4) | ii; } else { if (! AlreadyComplained) { pp->parseContext.ppError(ppToken->loc, "hexidecimal literal too big", "", ""); AlreadyComplained = 1; } ival = 0xffffffffffffffffull; } ch = getch(); } while ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')); } else { pp->parseContext.ppError(ppToken->loc, "bad digit in hexidecimal literal", "", ""); } if (ch == 'u' || ch == 'U') { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; isUnsigned = true; if (enableInt64) { int nextCh = getch(); if ((ch == 'u' && nextCh == 'l') || (ch == 'U' && nextCh == 'L')) { if (len < MaxTokenLength) ppToken->name[len++] = (char)nextCh; isInt64 = true; } else ungetch(); } } else if (enableInt64 && (ch == 'l' || ch == 'L')) { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; isInt64 = true; } else ungetch(); ppToken->name[len] = '\0'; if (isInt64) { ppToken->i64val = ival; return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; } else { ppToken->ival = (int)ival; return isUnsigned ? PpAtomConstUint : PpAtomConstInt; } } else { // could be octal integer or floating point, speculative pursue octal until it must be floating point bool isUnsigned = false; bool isInt64 = false; bool octalOverflow = false; bool nonOctal = false; ival = 0; // see how much octal-like stuff we can read while (ch >= '0' && ch <= '7') { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; else if (! AlreadyComplained) { pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); AlreadyComplained = 1; } if (ival <= 0x1fffffff || (enableInt64 && ival <= 0x1fffffffffffffffull)) { ii = ch - '0'; ival = (ival << 3) | ii; } else octalOverflow = true; ch = getch(); } // could be part of a float... if (ch == '8' || ch == '9') { nonOctal = true; do { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; else if (! AlreadyComplained) { pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); AlreadyComplained = 1; } ch = getch(); } while (ch >= '0' && ch <= '9'); } if (ch == '.' || ch == 'e' || ch == 'f' || ch == 'E' || ch == 'F') return pp->lFloatConst(len, ch, ppToken); // wasn't a float, so must be octal... if (nonOctal) pp->parseContext.ppError(ppToken->loc, "octal literal digit too large", "", ""); if (ch == 'u' || ch == 'U') { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; isUnsigned = true; if (enableInt64) { int nextCh = getch(); if ((ch == 'u' && nextCh == 'l') || (ch == 'U' && nextCh == 'L')) { if (len < MaxTokenLength) ppToken->name[len++] = (char)nextCh; isInt64 = true; } else ungetch(); } } else if (enableInt64 && (ch == 'l' || ch == 'L')) { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; isInt64 = true; } else ungetch(); ppToken->name[len] = '\0'; if (octalOverflow) pp->parseContext.ppError(ppToken->loc, "octal literal too big", "", ""); if (isInt64) { ppToken->i64val = ival; return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; } else { ppToken->ival = (int)ival; return isUnsigned ? PpAtomConstUint : PpAtomConstInt; } } break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': // can't be hexidecimal or octal, is either decimal or floating point do { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; else if (! AlreadyComplained) { pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); AlreadyComplained = 1; } ch = getch(); } while (ch >= '0' && ch <= '9'); if (ch == '.' || ch == 'e' || ch == 'f' || ch == 'E' || ch == 'F') { return pp->lFloatConst(len, ch, ppToken); } else { // Finish handling signed and unsigned integers int numericLen = len; bool isUnsigned = false; bool isInt64 = false; if (ch == 'u' || ch == 'U') { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; isUnsigned = true; if (enableInt64) { int nextCh = getch(); if ((ch == 'u' && nextCh == 'l') || (ch == 'U' && nextCh == 'L')) { if (len < MaxTokenLength) ppToken->name[len++] = (char)nextCh; isInt64 = true; } else ungetch(); } } else if (enableInt64 && (ch == 'l' || ch == 'L')) { if (len < MaxTokenLength) ppToken->name[len++] = (char)ch; isInt64 = true; } else ungetch(); ppToken->name[len] = '\0'; ival = 0; const unsigned oneTenthMaxInt = 0xFFFFFFFFu / 10; const unsigned remainderMaxInt = 0xFFFFFFFFu - 10 * oneTenthMaxInt; const unsigned long long oneTenthMaxInt64 = 0xFFFFFFFFFFFFFFFFull / 10; const unsigned long long remainderMaxInt64 = 0xFFFFFFFFFFFFFFFFull - 10 * oneTenthMaxInt64; for (int i = 0; i < numericLen; i++) { ch = ppToken->name[i] - '0'; if ((enableInt64 == false && ((ival > oneTenthMaxInt) || (ival == oneTenthMaxInt && (unsigned)ch > remainderMaxInt))) || (enableInt64 && ((ival > oneTenthMaxInt64) || (ival == oneTenthMaxInt64 && (unsigned long long)ch > remainderMaxInt64)))) { pp->parseContext.ppError(ppToken->loc, "numeric literal too big", "", ""); ival = 0xFFFFFFFFFFFFFFFFull; break; } else ival = ival * 10 + ch; } if (isInt64) { ppToken->i64val = ival; return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; } else { ppToken->ival = (int)ival; return isUnsigned ? PpAtomConstUint : PpAtomConstInt; } } break; case '-': ch = getch(); if (ch == '-') { return PpAtomDecrement; } else if (ch == '=') { return PpAtomSub; } else { ungetch(); return '-'; } case '+': ch = getch(); if (ch == '+') { return PpAtomIncrement; } else if (ch == '=') { return PpAtomAdd; } else { ungetch(); return '+'; } case '*': ch = getch(); if (ch == '=') { return PpAtomMul; } else { ungetch(); return '*'; } case '%': ch = getch(); if (ch == '=') { return PpAtomMod; } else { ungetch(); return '%'; } case '^': ch = getch(); if (ch == '^') { return PpAtomXor; } else { if (ch == '=') return PpAtomXorAssign; else{ ungetch(); return '^'; } } case '=': ch = getch(); if (ch == '=') { return PpAtomEQ; } else { ungetch(); return '='; } case '!': ch = getch(); if (ch == '=') { return PpAtomNE; } else { ungetch(); return '!'; } case '|': ch = getch(); if (ch == '|') { return PpAtomOr; } else if (ch == '=') { return PpAtomOrAssign; } else { ungetch(); return '|'; } case '&': ch = getch(); if (ch == '&') { return PpAtomAnd; } else if (ch == '=') { return PpAtomAndAssign; } else { ungetch(); return '&'; } case '<': ch = getch(); if (ch == '<') { ch = getch(); if (ch == '=') return PpAtomLeftAssign; else { ungetch(); return PpAtomLeft; } } else if (ch == '=') { return PpAtomLE; } else { ungetch(); return '<'; } case '>': ch = getch(); if (ch == '>') { ch = getch(); if (ch == '=') return PpAtomRightAssign; else { ungetch(); return PpAtomRight; } } else if (ch == '=') { return PpAtomGE; } else { ungetch(); return '>'; } case '.': ch = getch(); if (ch >= '0' && ch <= '9') { ungetch(); return pp->lFloatConst(0, '.', ppToken); } else { ungetch(); return '.'; } case '/': ch = getch(); if (ch == '/') { pp->inComment = true; do { ch = getch(); } while (ch != '\n' && ch != EndOfInput); ppToken->space = true; pp->inComment = false; return ch; } else if (ch == '*') { ch = getch(); do { while (ch != '*') { if (ch == EndOfInput) { pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", ""); return ch; } ch = getch(); } ch = getch(); if (ch == EndOfInput) { pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", ""); return ch; } } while (ch != '/'); ppToken->space = true; // loop again to get the next token... break; } else if (ch == '=') { return PpAtomDiv; } else { ungetch(); return '/'; } break; case '"': ch = getch(); while (ch != '"' && ch != '\n' && ch != EndOfInput) { if (len < MaxTokenLength) { tokenText[len] = (char)ch; len++; ch = getch(); } else break; }; tokenText[len] = '\0'; if (ch != '"') { ungetch(); pp->parseContext.ppError(ppToken->loc, "End of line in string", "string", ""); } return PpAtomConstString; } ch = getch(); } } // // The main functional entry-point into the preprocessor, which will // scan the source strings to figure out and return the next processing token. // // Return string pointer to next token. // Return 0 when no more tokens. // const char* TPpContext::tokenize(TPpToken* ppToken) { int token = '\n'; for(;;) { token = scanToken(ppToken); ppToken->token = token; if (token == EndOfInput) { missingEndifCheck(); return nullptr; } if (token == '#') { if (previous_token == '\n') { token = readCPPline(ppToken); if (token == EndOfInput) { missingEndifCheck(); return nullptr; } continue; } else { parseContext.ppError(ppToken->loc, "preprocessor directive cannot be preceded by another token", "#", ""); return nullptr; } } previous_token = token; if (token == '\n') continue; // expand macros if (token == PpAtomIdentifier && MacroExpand(ppToken->atom, ppToken, false, true) != 0) continue; const char* tokenString = nullptr; switch (token) { case PpAtomIdentifier: case PpAtomConstInt: case PpAtomConstUint: case PpAtomConstFloat: case PpAtomConstInt64: case PpAtomConstUint64: case PpAtomConstDouble: tokenString = ppToken->name; break; case PpAtomConstString: parseContext.ppError(ppToken->loc, "string literals not supported", "\"\"", ""); break; case '\'': parseContext.ppError(ppToken->loc, "character literals not supported", "\'", ""); break; default: tokenString = GetAtomString(token); break; } if (tokenString) { if (tokenString[0] != 0) parseContext.tokensBeforeEOF = 1; return tokenString; } } } // Checks if we've seen balanced #if...#endif void TPpContext::missingEndifCheck() { if (ifdepth > 0) parseContext.ppError(parseContext.getCurrentLoc(), "missing #endif", "", ""); } } // end namespace glslang
gpl-2.0
k5t4j5/kernel_htc_hybrid
drivers/spmi/spmi.c
38
19076
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/spmi.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include "spmi-dbgfs.h" struct spmii_boardinfo { struct list_head list; struct spmi_boardinfo board_info; }; static DEFINE_MUTEX(board_lock); static LIST_HEAD(board_list); static DEFINE_IDR(ctrl_idr); static struct device_type spmi_dev_type; static struct device_type spmi_ctrl_type; struct bus_type spmi_bus_type; static int spmi_register_controller(struct spmi_controller *ctrl); struct spmi_controller *spmi_busnum_to_ctrl(u32 bus_num) { struct spmi_controller *ctrl; mutex_lock(&board_lock); ctrl = idr_find(&ctrl_idr, bus_num); mutex_unlock(&board_lock); return ctrl; } EXPORT_SYMBOL_GPL(spmi_busnum_to_ctrl); int spmi_add_controller(struct spmi_controller *ctrl) { int id; int status; if (!ctrl) return -EINVAL; pr_debug("adding controller for bus %d (0x%p)\n", ctrl->nr, ctrl); if (ctrl->nr & ~MAX_ID_MASK) { pr_err("invalid bus identifier %d\n", ctrl->nr); return -EINVAL; } retry: if (idr_pre_get(&ctrl_idr, GFP_KERNEL) == 0) { pr_err("no free memory for idr\n"); return -ENOMEM; } mutex_lock(&board_lock); status = idr_get_new_above(&ctrl_idr, ctrl, ctrl->nr, &id); if (status == 0 && id != ctrl->nr) { status = -EBUSY; idr_remove(&ctrl_idr, id); } mutex_unlock(&board_lock); if (status == -EAGAIN) goto retry; if (status == 0) status = spmi_register_controller(ctrl); return status; } EXPORT_SYMBOL_GPL(spmi_add_controller); static int spmi_ctrl_remove_device(struct device *dev, void *data) { struct spmi_device *spmidev = to_spmi_device(dev); struct spmi_controller *ctrl = data; if (dev->type == &spmi_dev_type && spmidev->ctrl == ctrl) spmi_remove_device(spmidev); return 0; } int spmi_del_controller(struct spmi_controller *ctrl) { struct spmi_controller *found; if (!ctrl) return -EINVAL; mutex_lock(&board_lock); found = idr_find(&ctrl_idr, ctrl->nr); mutex_unlock(&board_lock); if (found != ctrl) return -EINVAL; mutex_lock(&board_lock); bus_for_each_dev(&spmi_bus_type, NULL, ctrl, spmi_ctrl_remove_device); mutex_unlock(&board_lock); spmi_dfs_del_controller(ctrl); mutex_lock(&board_lock); idr_remove(&ctrl_idr, ctrl->nr); mutex_unlock(&board_lock); init_completion(&ctrl->dev_released); device_unregister(&ctrl->dev); wait_for_completion(&ctrl->dev_released); return 0; } EXPORT_SYMBOL_GPL(spmi_del_controller); #define spmi_ctrl_attr_gr NULL static void spmi_ctrl_release(struct device *dev) { struct spmi_controller *ctrl = to_spmi_controller(dev); complete(&ctrl->dev_released); } static struct device_type spmi_ctrl_type = { .groups = spmi_ctrl_attr_gr, .release = spmi_ctrl_release, }; #define spmi_device_attr_gr NULL #define spmi_device_uevent NULL static void spmi_dev_release(struct device *dev) { struct spmi_device *spmidev = to_spmi_device(dev); kfree(spmidev); } static struct device_type spmi_dev_type = { .groups = spmi_device_attr_gr, .uevent = spmi_device_uevent, .release = spmi_dev_release, }; struct spmi_device *spmi_alloc_device(struct spmi_controller *ctrl) { struct spmi_device *spmidev; if (!ctrl || !spmi_busnum_to_ctrl(ctrl->nr)) { pr_err("Missing SPMI controller\n"); return NULL; } spmidev = kzalloc(sizeof(*spmidev), GFP_KERNEL); if (!spmidev) { dev_err(&ctrl->dev, "unable to allocate spmi_device\n"); return NULL; } spmidev->ctrl = ctrl; spmidev->dev.parent = ctrl->dev.parent; spmidev->dev.bus = &spmi_bus_type; spmidev->dev.type = &spmi_dev_type; device_initialize(&spmidev->dev); return spmidev; } EXPORT_SYMBOL_GPL(spmi_alloc_device); static struct device *get_valid_device(struct spmi_device *spmidev) { struct device *dev; if (!spmidev) return NULL; dev = &spmidev->dev; if (dev->bus != &spmi_bus_type || dev->type != &spmi_dev_type) return NULL; if (!spmidev->ctrl || !spmi_busnum_to_ctrl(spmidev->ctrl->nr)) return NULL; return dev; } int spmi_add_device(struct spmi_device *spmidev) { int rc; struct device *dev = get_valid_device(spmidev); if (!dev) { pr_err("invalid SPMI device\n"); return -EINVAL; } dev_set_name(dev, "%s-%p", spmidev->name, spmidev); rc = device_add(dev); if (rc < 0) dev_err(dev, "Can't add %s, status %d\n", dev_name(dev), rc); else dev_dbg(dev, "device %s registered\n", dev_name(dev)); return rc; } EXPORT_SYMBOL_GPL(spmi_add_device); struct spmi_device *spmi_new_device(struct spmi_controller *ctrl, struct spmi_boardinfo const *info) { struct spmi_device *spmidev; int rc; if (!ctrl || !info) return NULL; spmidev = spmi_alloc_device(ctrl); if (!spmidev) return NULL; spmidev->name = info->name; spmidev->sid = info->slave_id; spmidev->dev.of_node = info->of_node; spmidev->dev.platform_data = (void *)info->platform_data; spmidev->num_dev_node = info->num_dev_node; spmidev->dev_node = info->dev_node; spmidev->res = info->res; rc = spmi_add_device(spmidev); if (rc < 0) { spmi_dev_put(spmidev); return NULL; } return spmidev; } EXPORT_SYMBOL_GPL(spmi_new_device); void spmi_remove_device(struct spmi_device *spmi_dev) { device_unregister(&spmi_dev->dev); } EXPORT_SYMBOL_GPL(spmi_remove_device); static void spmi_match_ctrl_to_boardinfo(struct spmi_controller *ctrl, struct spmi_boardinfo *bi) { struct spmi_device *spmidev; spmidev = spmi_new_device(ctrl, bi); if (!spmidev) dev_err(ctrl->dev.parent, "can't create new device for %s\n", bi->name); } int spmi_register_board_info(int busnum, struct spmi_boardinfo const *info, unsigned n) { int i; struct spmii_boardinfo *bi; struct spmi_controller *ctrl; bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM; ctrl = spmi_busnum_to_ctrl(busnum); for (i = 0; i < n; i++, bi++, info++) { memcpy(&bi->board_info, info, sizeof(*info)); mutex_lock(&board_lock); list_add_tail(&bi->list, &board_list); if (ctrl) spmi_match_ctrl_to_boardinfo(ctrl, &bi->board_info); mutex_unlock(&board_lock); } return 0; } EXPORT_SYMBOL_GPL(spmi_register_board_info); static inline int spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid) { if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type) return -EINVAL; return ctrl->cmd(ctrl, opcode, sid); } static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid, u16 addr, u8 bc, u8 *buf) { if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type) return -EINVAL; return ctrl->read_cmd(ctrl, opcode, sid, addr, bc, buf); } static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid, u16 addr, u8 bc, u8 *buf) { if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type) return -EINVAL; return ctrl->write_cmd(ctrl, opcode, sid, addr, bc, buf); } int spmi_register_read(struct spmi_controller *ctrl, u8 sid, u8 addr, u8 *buf) { if (sid > SPMI_MAX_SLAVE_ID || addr > 0x1F) return -EINVAL; return spmi_read_cmd(ctrl, SPMI_CMD_READ, sid, addr, 0, buf); } EXPORT_SYMBOL_GPL(spmi_register_read); int spmi_ext_register_read(struct spmi_controller *ctrl, u8 sid, u8 addr, u8 *buf, int len) { if (sid > SPMI_MAX_SLAVE_ID || len <= 0 || len > 16) return -EINVAL; return spmi_read_cmd(ctrl, SPMI_CMD_EXT_READ, sid, addr, len - 1, buf); } EXPORT_SYMBOL_GPL(spmi_ext_register_read); int spmi_ext_register_readl(struct spmi_controller *ctrl, u8 sid, u16 addr, u8 *buf, int len) { if (sid > SPMI_MAX_SLAVE_ID || len <= 0 || len > 8) return -EINVAL; return spmi_read_cmd(ctrl, SPMI_CMD_EXT_READL, sid, addr, len - 1, buf); } EXPORT_SYMBOL_GPL(spmi_ext_register_readl); int spmi_register_write(struct spmi_controller *ctrl, u8 sid, u8 addr, u8 *buf) { u8 op = SPMI_CMD_WRITE; if (sid > SPMI_MAX_SLAVE_ID || addr > 0x1F) return -EINVAL; return spmi_write_cmd(ctrl, op, sid, addr, 0, buf); } EXPORT_SYMBOL_GPL(spmi_register_write); /** * spmi_register_zero_write() - register zero write * @dev: SPMI device. * @sid: slave identifier. * @data: the data to be written to register 0 (7-bits). * * Writes data to register 0 of the Slave device. */ int spmi_register_zero_write(struct spmi_controller *ctrl, u8 sid, u8 data) { u8 op = SPMI_CMD_ZERO_WRITE; if (sid > SPMI_MAX_SLAVE_ID) return -EINVAL; return spmi_write_cmd(ctrl, op, sid, 0, 0, &data); } EXPORT_SYMBOL_GPL(spmi_register_zero_write); int spmi_ext_register_write(struct spmi_controller *ctrl, u8 sid, u8 addr, u8 *buf, int len) { u8 op = SPMI_CMD_EXT_WRITE; if (sid > SPMI_MAX_SLAVE_ID || len <= 0 || len > 16) return -EINVAL; return spmi_write_cmd(ctrl, op, sid, addr, len - 1, buf); } EXPORT_SYMBOL_GPL(spmi_ext_register_write); int spmi_ext_register_writel(struct spmi_controller *ctrl, u8 sid, u16 addr, u8 *buf, int len) { u8 op = SPMI_CMD_EXT_WRITEL; if (sid > SPMI_MAX_SLAVE_ID || len <= 0 || len > 8) return -EINVAL; return spmi_write_cmd(ctrl, op, sid, addr, len - 1, buf); } EXPORT_SYMBOL_GPL(spmi_ext_register_writel); int spmi_command_reset(struct spmi_controller *ctrl, u8 sid) { if (sid > SPMI_MAX_SLAVE_ID) return -EINVAL; return spmi_cmd(ctrl, SPMI_CMD_RESET, sid); } EXPORT_SYMBOL_GPL(spmi_command_reset); int spmi_command_sleep(struct spmi_controller *ctrl, u8 sid) { if (sid > SPMI_MAX_SLAVE_ID) return -EINVAL; return spmi_cmd(ctrl, SPMI_CMD_SLEEP, sid); } EXPORT_SYMBOL_GPL(spmi_command_sleep); int spmi_command_wakeup(struct spmi_controller *ctrl, u8 sid) { if (sid > SPMI_MAX_SLAVE_ID) return -EINVAL; return spmi_cmd(ctrl, SPMI_CMD_WAKEUP, sid); } EXPORT_SYMBOL_GPL(spmi_command_wakeup); int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid) { if (sid > SPMI_MAX_SLAVE_ID) return -EINVAL; return spmi_cmd(ctrl, SPMI_CMD_SHUTDOWN, sid); } EXPORT_SYMBOL_GPL(spmi_command_shutdown); static const struct spmi_device_id *spmi_match(const struct spmi_device_id *id, const struct spmi_device *spmi_dev) { while (id->name[0]) { if (strncmp(spmi_dev->name, id->name, SPMI_NAME_SIZE) == 0) return id; id++; } return NULL; } static int spmi_device_match(struct device *dev, struct device_driver *drv) { struct spmi_device *spmi_dev; struct spmi_driver *sdrv = to_spmi_driver(drv); if (dev->type == &spmi_dev_type) spmi_dev = to_spmi_device(dev); else return 0; if (of_driver_match_device(dev, drv)) return 1; if (sdrv->id_table) return spmi_match(sdrv->id_table, spmi_dev) != NULL; if (drv->name) return strncmp(spmi_dev->name, drv->name, SPMI_NAME_SIZE) == 0; return 0; } #ifdef CONFIG_PM_SLEEP static int spmi_legacy_suspend(struct device *dev, pm_message_t mesg) { struct spmi_device *spmi_dev = NULL; struct spmi_driver *driver; if (dev->type == &spmi_dev_type) spmi_dev = to_spmi_device(dev); if (!spmi_dev || !dev->driver) return 0; driver = to_spmi_driver(dev->driver); if (!driver->suspend) return 0; return driver->suspend(spmi_dev, mesg); } static int spmi_legacy_resume(struct device *dev) { struct spmi_device *spmi_dev = NULL; struct spmi_driver *driver; if (dev->type == &spmi_dev_type) spmi_dev = to_spmi_device(dev); if (!spmi_dev || !dev->driver) return 0; driver = to_spmi_driver(dev->driver); if (!driver->resume) return 0; return driver->resume(spmi_dev); } static int spmi_pm_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_suspend(dev); else return spmi_legacy_suspend(dev, PMSG_SUSPEND); } static int spmi_pm_resume(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm) return pm_generic_resume(dev); else return spmi_legacy_resume(dev); } #else #define spmi_pm_suspend NULL #define spmi_pm_resume NULL #endif static const struct dev_pm_ops spmi_pm_ops = { .suspend = spmi_pm_suspend, .resume = spmi_pm_resume, SET_RUNTIME_PM_OPS( pm_generic_suspend, pm_generic_resume, pm_generic_runtime_idle ) }; struct bus_type spmi_bus_type = { .name = "spmi", .match = spmi_device_match, .pm = &spmi_pm_ops, }; EXPORT_SYMBOL_GPL(spmi_bus_type); struct device spmi_dev = { .init_name = "spmi", }; static int spmi_drv_probe(struct device *dev) { const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); return sdrv->probe(to_spmi_device(dev)); } static int spmi_drv_remove(struct device *dev) { const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); return sdrv->remove(to_spmi_device(dev)); } static void spmi_drv_shutdown(struct device *dev) { const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); sdrv->shutdown(to_spmi_device(dev)); } int spmi_driver_register(struct spmi_driver *drv) { drv->driver.bus = &spmi_bus_type; if (drv->probe) drv->driver.probe = spmi_drv_probe; if (drv->remove) drv->driver.remove = spmi_drv_remove; if (drv->shutdown) drv->driver.shutdown = spmi_drv_shutdown; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(spmi_driver_register); #ifdef CONFIG_HTC_POWER_DEBUG #define MAX_REG_PER_TRANSACTION (8) #define PON_REVISION2 0x801 #define PON_PON_REASON1 0x808 #define PON_WARM_RESET_REASON1 0x80A #define PON_WARM_RESET_REASON2 0x80B #define PON_POFF_REASON1 0x80C #define PON_POFF_REASON2 0x80D #define PON_SOFT_RESET_REASON1 0x80E #define PON_SOFT_RESET_REASON2 0x80F enum { HARD_RESET_TRIGGERED_BIT, SMPL_TRIGGERED_BIT, RTC_TRIGGERED_BIT, DC_CHG_TRIGGERED_BIT, USB_CHG_TRIGGERED_BIT, PON1_TRIGGERED_BIT, CBLPWR_N_TRIGGERED_BIT, KPDPWR_N_TRIGGERED_BIT, PON_BIT_MAX, } pon_reason_bit; enum { SOFT_TRIGGERED_BIT, PS_HOLD_TRIGGERED_BIT, PMIC_WD_TRIGGERED_BIT, GP1_TRIGGERED_BIT, GP2_TRIGGERED_BIT, KPDPWR_AND_RESIN_TRIGGERED_BIT, RESIN_N_TRIGGERED_BIT, KPDPWR_TRIGGERED_BIT, WARM_REASON1_BIT_MAX, } warm_soft_reset_poff_reason1_bit; enum { AFP_TRIGGERED_BIT = 4, WARM_REASON2_BIT_MAX, } warm_soft_reset_reason2_bit; enum { CHARGER_TRIGGERED_BIT = 3, POFF_AFP_TRIGGERED_BIT, UVLO_TRIGGERED_BIT, OTST3_TRIGGERED_BIT, STAGE3_TRIGGERED_BIT, POFF_REASON2_BIT_MAX, } poff_reason2_bit; char *pon_reason[PON_BIT_MAX] = { "Hard Reset", "SMPL", "RTC", "DC Charger", "USB Charger", "Pon1", "CBL_PWR1", "Keypad Power" }; char *warm_reset_reason1[WARM_REASON1_BIT_MAX] = { "Software", "PS Hold", "PMIC Watchdog", "Keypad Reset1", "Keypad Reset2", "Kpdpwr + Resin", "Resin", "Keypad Power" }; char *warm_reset_reason2[WARM_REASON2_BIT_MAX] = { "AFP", }; char *poff_reason2[POFF_REASON2_BIT_MAX] = { "Charger", "AFP", "UVLO", "OTST3", "Stage3" }; void htc_get_reset_reason(int type, uint8_t value) { int bit_idx = 0; int start_bit = 0; int end_bit = 0; char **reason_desc = NULL; switch (type) { case PON_PON_REASON1: start_bit = HARD_RESET_TRIGGERED_BIT; end_bit = PON_BIT_MAX; reason_desc = pon_reason; break; case PON_WARM_RESET_REASON1: case PON_SOFT_RESET_REASON1: case PON_POFF_REASON1: start_bit = SOFT_TRIGGERED_BIT; end_bit = WARM_REASON1_BIT_MAX; reason_desc = warm_reset_reason1; break; case PON_WARM_RESET_REASON2: case PON_SOFT_RESET_REASON2: start_bit = AFP_TRIGGERED_BIT; end_bit = WARM_REASON2_BIT_MAX; reason_desc = warm_reset_reason2; break; case PON_POFF_REASON2: start_bit = CHARGER_TRIGGERED_BIT; end_bit = POFF_REASON2_BIT_MAX; reason_desc = poff_reason2; break; default: break; } for (bit_idx = start_bit; bit_idx < end_bit; bit_idx++) { if (value & (1 << bit_idx)) { printk(KERN_INFO "%s, (0x%x)", reason_desc[bit_idx - start_bit], value); } } } static int htc_spmi_read_data(struct spmi_controller *ctrl, uint8_t *buf, int offset, int cnt) { int ret = 0; int len; uint8_t sid; uint16_t addr; while (cnt > 0) { sid = (offset >> 16) & 0xF; addr = offset & 0xFFFF; len = min(cnt, MAX_REG_PER_TRANSACTION); ret = spmi_ext_register_readl(ctrl, sid, addr, buf, len); if (ret < 0) { pr_err("SPMI read failed, err = %d\n", ret); goto done; } cnt -= len; buf += len; offset += len; } done: return ret; } static void htc_get_pon_boot_reason(struct spmi_controller *ctrl) { uint8_t val = 0; printk(KERN_INFO "------ Reset Reason ------\n"); htc_spmi_read_data(ctrl, &val, PON_PON_REASON1, 1); printk(KERN_INFO "[PON_PON_REASON]"); htc_get_reset_reason(PON_PON_REASON1, val); htc_spmi_read_data(ctrl, &val, PON_WARM_RESET_REASON1, 1); printk(KERN_INFO "[PON_WARM_RESET_REASON]"); htc_get_reset_reason(PON_WARM_RESET_REASON1, val); htc_spmi_read_data(ctrl, &val, PON_WARM_RESET_REASON2, 1); htc_get_reset_reason(PON_WARM_RESET_REASON2, val); htc_spmi_read_data(ctrl, &val, PON_SOFT_RESET_REASON1, 1); printk(KERN_INFO "[PON_SOFT_RESET_REASON]"); htc_get_reset_reason(PON_SOFT_RESET_REASON1, val); htc_spmi_read_data(ctrl, &val, PON_SOFT_RESET_REASON2, 1); htc_get_reset_reason(PON_SOFT_RESET_REASON2, val); htc_spmi_read_data(ctrl, &val, PON_POFF_REASON1, 1); printk(KERN_INFO "[PON_POFF_REASON]"); htc_get_reset_reason(PON_POFF_REASON1, val); htc_spmi_read_data(ctrl, &val, PON_POFF_REASON2, 1); htc_get_reset_reason(PON_POFF_REASON2, val); printk(KERN_INFO "-------------------------\n"); } #endif static int spmi_register_controller(struct spmi_controller *ctrl) { int ret = 0; if (WARN_ON(!spmi_bus_type.p)) { ret = -EAGAIN; goto exit; } dev_set_name(&ctrl->dev, "spmi-%d", ctrl->nr); ctrl->dev.bus = &spmi_bus_type; ctrl->dev.type = &spmi_ctrl_type; ret = device_register(&ctrl->dev); if (ret) goto exit; dev_dbg(&ctrl->dev, "Bus spmi-%d registered: dev:%x\n", ctrl->nr, (u32)&ctrl->dev); spmi_dfs_add_controller(ctrl); #ifdef CONFIG_HTC_POWER_DEBUG htc_get_pon_boot_reason(ctrl); #endif return 0; exit: mutex_lock(&board_lock); idr_remove(&ctrl_idr, ctrl->nr); mutex_unlock(&board_lock); return ret; } static void __exit spmi_exit(void) { device_unregister(&spmi_dev); bus_unregister(&spmi_bus_type); } static int __init spmi_init(void) { int retval; retval = bus_register(&spmi_bus_type); if (!retval) retval = device_register(&spmi_dev); if (retval) bus_unregister(&spmi_bus_type); return retval; } postcore_initcall(spmi_init); module_exit(spmi_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_DESCRIPTION("SPMI module"); MODULE_ALIAS("platform:spmi");
gpl-2.0
tepelmann/linux-perf
drivers/usb/host/uhci-platform.c
38
4229
/* * Generic UHCI HCD (Host Controller Driver) for Platform Devices * * Copyright (c) 2011 Tony Prisk <linux@prisktech.co.nz> * * This file is based on uhci-grlib.c * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu */ #include <linux/of.h> #include <linux/platform_device.h> static int uhci_platform_init(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); uhci->rh_numports = uhci_count_ports(hcd); /* Set up pointers to to generic functions */ uhci->reset_hc = uhci_generic_reset_hc; uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc; /* No special actions need to be taken for the functions below */ uhci->configure_hc = NULL; uhci->resume_detect_interrupts_are_broken = NULL; uhci->global_suspend_mode_is_broken = NULL; /* Reset if the controller isn't already safely quiescent. */ check_and_reset_hc(uhci); return 0; } static const struct hc_driver uhci_platform_hc_driver = { .description = hcd_name, .product_desc = "Generic UHCI Host Controller", .hcd_priv_size = sizeof(struct uhci_hcd), /* Generic hardware linkage */ .irq = uhci_irq, .flags = HCD_MEMORY | HCD_USB11, /* Basic lifecycle operations */ .reset = uhci_platform_init, .start = uhci_start, #ifdef CONFIG_PM .pci_suspend = NULL, .pci_resume = NULL, .bus_suspend = uhci_rh_suspend, .bus_resume = uhci_rh_resume, #endif .stop = uhci_stop, .urb_enqueue = uhci_urb_enqueue, .urb_dequeue = uhci_urb_dequeue, .endpoint_disable = uhci_hcd_endpoint_disable, .get_frame_number = uhci_hcd_get_frame_number, .hub_status_data = uhci_hub_status_data, .hub_control = uhci_hub_control, }; static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32); static int __devinit uhci_hcd_platform_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct uhci_hcd *uhci; struct resource *res; int ret; if (usb_disabled()) return -ENODEV; /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &platform_uhci_dma_mask; hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, pdev->name); if (!hcd) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_err("%s: request_mem_region failed\n", __func__); ret = -EBUSY; goto err_rmr; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_err("%s: ioremap failed\n", __func__); ret = -ENOMEM; goto err_irq; } uhci = hcd_to_uhci(hcd); uhci->regs = hcd->regs; ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED | IRQF_SHARED); if (ret) goto err_uhci; return 0; err_uhci: iounmap(hcd->regs); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err_rmr: usb_put_hcd(hcd); return ret; } static int uhci_hcd_platform_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); platform_set_drvdata(pdev, NULL); return 0; } /* Make sure the controller is quiescent and that we're not using it * any more. This is mainly for the benefit of programs which, like kexec, * expect the hardware to be idle: not doing DMA or generating IRQs. * * This routine may be called in a damaged or failing kernel. Hence we * do not acquire the spinlock before shutting down the controller. */ static void uhci_hcd_platform_shutdown(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); uhci_hc_died(hcd_to_uhci(hcd)); } static const struct of_device_id platform_uhci_ids[] = { { .compatible = "platform-uhci", }, {} }; static struct platform_driver uhci_platform_driver = { .probe = uhci_hcd_platform_probe, .remove = uhci_hcd_platform_remove, .shutdown = uhci_hcd_platform_shutdown, .driver = { .name = "platform-uhci", .owner = THIS_MODULE, .of_match_table = of_match_ptr(platform_uhci_ids), }, };
gpl-2.0
chapuni/gcc
gcc/testsuite/gfortran.dg/used_before_typed_3.f90
38
1045
! { dg-do compile } ! { dg-options "-std=f95" } ! PR fortran/32095 ! PR fortran/34228 ! Check for a special case when the return-type of a function is given outside ! its "body" and contains symbols defined inside. MODULE testmod IMPLICIT REAL(a-z) CONTAINS CHARACTER(len=x) FUNCTION test1 (x) ! { dg-error "of INTEGER" } IMPLICIT REAL(a-z) INTEGER :: x ! { dg-error "already has basic type" } test1 = "foobar" END FUNCTION test1 CHARACTER(len=x) FUNCTION test2 (x) ! { dg-bogus "used before|of INTEGER" } IMPLICIT INTEGER(a-z) test2 = "foobar" END FUNCTION test2 END MODULE testmod CHARACTER(len=i) FUNCTION test3 (i) ! { dg-bogus "used before|of INTEGER" } ! i is IMPLICIT INTEGER by default test3 = "foobar" END FUNCTION test3 CHARACTER(len=g) FUNCTION test4 (g) ! { dg-error "of INTEGER" } ! g is REAL, unless declared INTEGER. test4 = "foobar" END FUNCTION test4 ! Test an empty function works, too. INTEGER FUNCTION test5 () END FUNCTION test5 ! { dg-final { cleanup-modules "testmod" } }
gpl-2.0
lucaspcamargo/litmus-rt
drivers/gpio/gpio-mpc8xxx.c
294
11450
/* * GPIOs on MPC512x/8349/8572/8610 and compatible * * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/irq.h> #define MPC8XXX_GPIO_PINS 32 #define GPIO_DIR 0x00 #define GPIO_ODR 0x04 #define GPIO_DAT 0x08 #define GPIO_IER 0x0c #define GPIO_IMR 0x10 #define GPIO_ICR 0x14 #define GPIO_ICR2 0x18 struct mpc8xxx_gpio_chip { struct of_mm_gpio_chip mm_gc; spinlock_t lock; /* * shadowed data register to be able to clear/set output pins in * open drain mode safely */ u32 data; struct irq_domain *irq; unsigned int irqn; const void *of_dev_id_data; }; static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) { return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio); } static inline struct mpc8xxx_gpio_chip * to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) { return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); } static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); } /* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs * defined as output cannot be determined by reading GPDAT register, * so we use shadow data register instead. The status of input pins * is determined by reading GPDAT register. */ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio) { u32 val; struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); u32 out_mask, out_shadow; out_mask = in_be32(mm->regs + GPIO_DIR); val = in_be32(mm->regs + GPIO_DAT) & ~out_mask; out_shadow = mpc8xxx_gc->data & out_mask; return (val | out_shadow) & mpc8xxx_gpio2mask(gpio); } static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); } static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); if (val) mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio); else mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio); out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; int i; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); for (i = 0; i < gc->ngpio; i++) { if (*mask == 0) break; if (__test_and_clear_bit(i, mask)) { if (test_bit(i, bits)) mpc8xxx_gc->data |= mpc8xxx_gpio2mask(i); else mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(i); } } out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); unsigned long flags; mpc8xxx_gpio_set(gc, gpio, val); spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); return 0; } static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { /* GPIO 28..31 are input only on MPC5121 */ if (gpio >= 28) return -EINVAL; return mpc8xxx_gpio_dir_out(gc, gpio, val); } static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) { struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS) return irq_create_mapping(mpc8xxx_gc->irq, offset); else return -ENXIO; } static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned int mask; mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); if (mask) generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 32 - ffs(mask))); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } static void mpc8xxx_irq_unmask(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_mask(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } static void mpc8xxx_irq_ack(struct irq_data *d) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); } static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; } static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long gpio = irqd_to_hwirq(d); void __iomem *reg; unsigned int shift; unsigned long flags; if (gpio < 16) { reg = mm->regs + GPIO_ICR; shift = (15 - gpio) * 2; } else { reg = mm->regs + GPIO_ICR2; shift = (15 - (gpio % 16)) * 2; } switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_LEVEL_LOW: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 2 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_LEVEL_HIGH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrsetbits_be32(reg, 3 << shift, 1 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(reg, 3 << shift); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; } static struct irq_chip mpc8xxx_irq_chip = { .name = "mpc8xxx-gpio", .irq_unmask = mpc8xxx_irq_unmask, .irq_mask = mpc8xxx_irq_mask, .irq_ack = mpc8xxx_irq_ack, .irq_set_type = mpc8xxx_irq_set_type, }; static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq, irq_hw_number_t hwirq) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; if (mpc8xxx_gc->of_dev_id_data) mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; irq_set_chip_data(irq, h->host_data); irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); return 0; } static struct irq_domain_ops mpc8xxx_gpio_irq_ops = { .map = mpc8xxx_gpio_irq_map, .xlate = irq_domain_xlate_twocell, }; static struct of_device_id mpc8xxx_gpio_ids[] = { { .compatible = "fsl,mpc8349-gpio", }, { .compatible = "fsl,mpc8572-gpio", }, { .compatible = "fsl,mpc8610-gpio", }, { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, }, { .compatible = "fsl,pq3-gpio", }, { .compatible = "fsl,qoriq-gpio", }, {} }; static int mpc8xxx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; int ret; mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL); if (!mpc8xxx_gc) return -ENOMEM; platform_set_drvdata(pdev, mpc8xxx_gc); spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ? mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out; gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ? mpc8572_gpio_get : mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->set_multiple = mpc8xxx_gpio_set_multiple; gc->to_irq = mpc8xxx_gpio_to_irq; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) return ret; mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0); if (mpc8xxx_gc->irqn == NO_IRQ) return 0; mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, mpc8xxx_gc); if (!mpc8xxx_gc->irq) return 0; id = of_match_node(mpc8xxx_gpio_ids, np); if (id) mpc8xxx_gc->of_dev_id_data = id->data; /* ack and mask all irqs */ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc); irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade); return 0; } static int mpc8xxx_remove(struct platform_device *pdev) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev); if (mpc8xxx_gc->irq) { irq_set_handler_data(mpc8xxx_gc->irqn, NULL); irq_set_chained_handler(mpc8xxx_gc->irqn, NULL); irq_domain_remove(mpc8xxx_gc->irq); } of_mm_gpiochip_remove(&mpc8xxx_gc->mm_gc); return 0; } static struct platform_driver mpc8xxx_plat_driver = { .probe = mpc8xxx_probe, .remove = mpc8xxx_remove, .driver = { .name = "gpio-mpc8xxx", .of_match_table = mpc8xxx_gpio_ids, }, }; static int __init mpc8xxx_init(void) { return platform_driver_register(&mpc8xxx_plat_driver); } arch_initcall(mpc8xxx_init);
gpl-2.0
vantjnh1991/F160-JB
drivers/slimbus/slim-msm-ctrl.c
294
63409
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slimbus/slimbus.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_slimbus.h> #include <mach/sps.h> /* Per spec.max 40 bytes per received message */ #define SLIM_RX_MSGQ_BUF_LEN 40 #define SLIM_USR_MC_GENERIC_ACK 0x25 #define SLIM_USR_MC_MASTER_CAPABILITY 0x0 #define SLIM_USR_MC_REPORT_SATELLITE 0x1 #define SLIM_USR_MC_ADDR_QUERY 0xD #define SLIM_USR_MC_ADDR_REPLY 0xE #define SLIM_USR_MC_DEFINE_CHAN 0x20 #define SLIM_USR_MC_DEF_ACT_CHAN 0x21 #define SLIM_USR_MC_CHAN_CTRL 0x23 #define SLIM_USR_MC_RECONFIG_NOW 0x24 #define SLIM_USR_MC_REQ_BW 0x28 #define SLIM_USR_MC_CONNECT_SRC 0x2C #define SLIM_USR_MC_CONNECT_SINK 0x2D #define SLIM_USR_MC_DISCONNECT_PORT 0x2E /* MSM Slimbus peripheral settings */ #define MSM_SLIM_PERF_SUMM_THRESHOLD 0x8000 #define MSM_SLIM_NCHANS 32 #define MSM_SLIM_NPORTS 24 #define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC /* * Need enough descriptors to receive present messages from slaves * if received simultaneously. Present message needs 3 descriptors * and this size will ensure around 10 simultaneous reports. */ #define MSM_SLIM_DESC_NUM 32 #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) #define MSM_SLIM_NAME "msm_slim_ctrl" #define SLIM_ROOT_FREQ 24576000 #define MSM_CONCUR_MSG 8 #define SAT_CONCUR_MSG 8 #define DEF_WATERMARK (8 << 1) #define DEF_ALIGN 0 #define DEF_PACK (1 << 6) #define ENABLE_PORT 1 #define DEF_BLKSZ 0 #define DEF_TRANSZ 0 #define SAT_MAGIC_LSB 0xD9 #define SAT_MAGIC_MSB 0xC5 #define SAT_MSG_VER 0x1 #define SAT_MSG_PROT 0x1 #define MSM_SAT_SUCCSS 0x20 #define MSM_MAX_NSATS 2 #define MSM_MAX_SATCH 32 #define QC_MFGID_LSB 0x2 #define QC_MFGID_MSB 0x17 #define QC_CHIPID_SL 0x10 #define QC_DEVID_SAT1 0x3 #define QC_DEVID_SAT2 0x4 #define QC_DEVID_PGD 0x5 #define QC_MSM_DEVS 5 #define INIT_MX_RETRIES 10 #define DEF_RETRY_MS 10 #define PGD_THIS_EE(r, v) ((v) ? PGD_THIS_EE_V2(r) : PGD_THIS_EE_V1(r)) #define PGD_PORT(r, p, v) ((v) ? PGD_PORT_V2(r, p) : PGD_PORT_V1(r, p)) #define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r)) #define PGD_THIS_EE_V2(r) (dev->base + (r ## _V2) + (dev->ee * 0x1000)) #define PGD_PORT_V2(r, p) (dev->base + (r ## _V2) + ((p) * 0x1000)) #define CFG_PORT_V2(r) ((r ## _V2)) /* Component registers */ enum comp_reg_v2 { COMP_CFG_V2 = 4, COMP_TRUST_CFG_V2 = 0x3000, }; /* Manager PGD registers */ enum pgd_reg_v2 { PGD_CFG_V2 = 0x800, PGD_STAT_V2 = 0x804, PGD_INT_EN_V2 = 0x810, PGD_INT_STAT_V2 = 0x814, PGD_INT_CLR_V2 = 0x818, PGD_OWN_EEn_V2 = 0x300C, PGD_PORT_INT_EN_EEn_V2 = 0x5000, PGD_PORT_INT_ST_EEn_V2 = 0x5004, PGD_PORT_INT_CL_EEn_V2 = 0x5008, PGD_PORT_CFGn_V2 = 0x14000, PGD_PORT_STATn_V2 = 0x14004, PGD_PORT_PARAMn_V2 = 0x14008, PGD_PORT_BLKn_V2 = 0x1400C, PGD_PORT_TRANn_V2 = 0x14010, PGD_PORT_MCHANn_V2 = 0x14014, PGD_PORT_PSHPLLn_V2 = 0x14018, PGD_PORT_PC_CFGn_V2 = 0x8000, PGD_PORT_PC_VALn_V2 = 0x8004, PGD_PORT_PC_VFR_TSn_V2 = 0x8008, PGD_PORT_PC_VFR_STn_V2 = 0x800C, PGD_PORT_PC_VFR_CLn_V2 = 0x8010, PGD_IE_STAT_V2 = 0x820, PGD_VE_STAT_V2 = 0x830, }; #define PGD_THIS_EE_V1(r) (dev->base + (r ## _V1) + (dev->ee * 16)) #define PGD_PORT_V1(r, p) (dev->base + (r ## _V1) + ((p) * 32)) #define CFG_PORT_V1(r) ((r ## _V1)) /* Component registers */ enum comp_reg_v1 { COMP_CFG_V1 = 0, COMP_TRUST_CFG_V1 = 0x14, }; /* Manager PGD registers */ enum pgd_reg_v1 { PGD_CFG_V1 = 0x1000, PGD_STAT_V1 = 0x1004, PGD_INT_EN_V1 = 0x1010, PGD_INT_STAT_V1 = 0x1014, PGD_INT_CLR_V1 = 0x1018, PGD_OWN_EEn_V1 = 0x1020, PGD_PORT_INT_EN_EEn_V1 = 0x1030, PGD_PORT_INT_ST_EEn_V1 = 0x1034, PGD_PORT_INT_CL_EEn_V1 = 0x1038, PGD_PORT_CFGn_V1 = 0x1080, PGD_PORT_STATn_V1 = 0x1084, PGD_PORT_PARAMn_V1 = 0x1088, PGD_PORT_BLKn_V1 = 0x108C, PGD_PORT_TRANn_V1 = 0x1090, PGD_PORT_MCHANn_V1 = 0x1094, PGD_PORT_PSHPLLn_V1 = 0x1098, PGD_PORT_PC_CFGn_V1 = 0x1600, PGD_PORT_PC_VALn_V1 = 0x1604, PGD_PORT_PC_VFR_TSn_V1 = 0x1608, PGD_PORT_PC_VFR_STn_V1 = 0x160C, PGD_PORT_PC_VFR_CLn_V1 = 0x1610, PGD_IE_STAT_V1 = 0x1700, PGD_VE_STAT_V1 = 0x1710, }; /* Manager registers */ enum mgr_reg { MGR_CFG = 0x200, MGR_STATUS = 0x204, MGR_RX_MSGQ_CFG = 0x208, MGR_INT_EN = 0x210, MGR_INT_STAT = 0x214, MGR_INT_CLR = 0x218, MGR_TX_MSG = 0x230, MGR_RX_MSG = 0x270, MGR_IE_STAT = 0x2F0, MGR_VE_STAT = 0x300, }; enum msg_cfg { MGR_CFG_ENABLE = 1, MGR_CFG_RX_MSGQ_EN = 1 << 1, MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2, MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3, }; /* Message queue types */ enum msm_slim_msgq_type { MSGQ_RX = 0, MSGQ_TX_LOW = 1, MSGQ_TX_HIGH = 2, }; /* Framer registers */ enum frm_reg { FRM_CFG = 0x400, FRM_STAT = 0x404, FRM_INT_EN = 0x410, FRM_INT_STAT = 0x414, FRM_INT_CLR = 0x418, FRM_WAKEUP = 0x41C, FRM_CLKCTL_DONE = 0x420, FRM_IE_STAT = 0x430, FRM_VE_STAT = 0x440, }; /* Interface registers */ enum intf_reg { INTF_CFG = 0x600, INTF_STAT = 0x604, INTF_INT_EN = 0x610, INTF_INT_STAT = 0x614, INTF_INT_CLR = 0x618, INTF_IE_STAT = 0x630, INTF_VE_STAT = 0x640, }; enum rsc_grp { EE_MGR_RSC_GRP = 1 << 10, EE_NGD_2 = 2 << 6, EE_NGD_1 = 0, }; enum mgr_intr { MGR_INT_RECFG_DONE = 1 << 24, MGR_INT_TX_NACKED_2 = 1 << 25, MGR_INT_MSG_BUF_CONTE = 1 << 26, MGR_INT_RX_MSG_RCVD = 1 << 30, MGR_INT_TX_MSG_SENT = 1 << 31, }; enum frm_cfg { FRM_ACTIVE = 1, CLK_GEAR = 7, ROOT_FREQ = 11, REF_CLK_GEAR = 15, }; enum msm_ctrl_state { MSM_CTRL_AWAKE, MSM_CTRL_SLEEPING, MSM_CTRL_ASLEEP, }; struct msm_slim_sps_bam { u32 hdl; void __iomem *base; int irq; }; struct msm_slim_endp { struct sps_pipe *sps; struct sps_connect config; struct sps_register_event event; struct sps_mem_buffer buf; struct completion *xcomp; bool connected; }; struct msm_slim_ctrl { struct slim_controller ctrl; struct slim_framer framer; struct device *dev; void __iomem *base; struct resource *slew_mem; u32 curr_bw; u8 msg_cnt; u32 tx_buf[10]; u8 rx_msgs[MSM_CONCUR_MSG][SLIM_RX_MSGQ_BUF_LEN]; spinlock_t rx_lock; int head; int tail; int irq; int err; int ee; struct completion *wr_comp; struct msm_slim_sat *satd[MSM_MAX_NSATS]; struct msm_slim_endp pipes[7]; struct msm_slim_sps_bam bam; struct msm_slim_endp rx_msgq; struct completion rx_msgq_notify; struct task_struct *rx_msgq_thread; struct clk *rclk; struct mutex tx_lock; u8 pgdla; bool use_rx_msgqs; int pipe_b; struct completion reconf; bool reconf_busy; bool chan_active; enum msm_ctrl_state state; int nsats; u32 ver; }; struct msm_sat_chan { u8 chan; u16 chanh; int req_rem; int req_def; bool reconf; }; struct msm_slim_sat { struct slim_device satcl; struct msm_slim_ctrl *dev; struct workqueue_struct *wq; struct work_struct wd; u8 sat_msgs[SAT_CONCUR_MSG][40]; struct msm_sat_chan *satch; u8 nsatch; bool sent_capability; bool pending_reconf; bool pending_capability; int shead; int stail; spinlock_t lock; }; static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev); static int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len) { spin_lock(&dev->rx_lock); if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) { spin_unlock(&dev->rx_lock); dev_err(dev->dev, "RX QUEUE full!"); return -EXFULL; } memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len); dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG; spin_unlock(&dev->rx_lock); return 0; } static int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf) { unsigned long flags; spin_lock_irqsave(&dev->rx_lock, flags); if (dev->tail == dev->head) { spin_unlock_irqrestore(&dev->rx_lock, flags); return -ENODATA; } memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40); dev->head = (dev->head + 1) % MSM_CONCUR_MSG; spin_unlock_irqrestore(&dev->rx_lock, flags); return 0; } static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len) { struct msm_slim_ctrl *dev = sat->dev; spin_lock(&sat->lock); if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) { spin_unlock(&sat->lock); dev_err(dev->dev, "SAT QUEUE full!"); return -EXFULL; } memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len); sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG; spin_unlock(&sat->lock); return 0; } static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf) { unsigned long flags; spin_lock_irqsave(&sat->lock, flags); if (sat->stail == sat->shead) { spin_unlock_irqrestore(&sat->lock, flags); return -ENODATA; } memcpy(buf, sat->sat_msgs[sat->shead], 40); sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG; spin_unlock_irqrestore(&sat->lock, flags); return 0; } static void msm_get_eaddr(u8 *e_addr, u32 *buffer) { e_addr[0] = (buffer[1] >> 24) & 0xff; e_addr[1] = (buffer[1] >> 16) & 0xff; e_addr[2] = (buffer[1] >> 8) & 0xff; e_addr[3] = buffer[1] & 0xff; e_addr[4] = (buffer[0] >> 24) & 0xff; e_addr[5] = (buffer[0] >> 16) & 0xff; } static bool msm_is_sat_dev(u8 *e_addr) { if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB && e_addr[2] != QC_CHIPID_SL && (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2)) return true; return false; } static int msm_slim_get_ctrl(struct msm_slim_ctrl *dev) { #ifdef CONFIG_PM_RUNTIME int ref = 0; int ret = pm_runtime_get_sync(dev->dev); if (ret >= 0) { ref = atomic_read(&dev->dev->power.usage_count); if (ref <= 0) { dev_err(dev->dev, "reference count -ve:%d", ref); ret = -ENODEV; } } return ret; #else return -ENODEV; #endif } static void msm_slim_put_ctrl(struct msm_slim_ctrl *dev) { #ifdef CONFIG_PM_RUNTIME int ref; pm_runtime_mark_last_busy(dev->dev); ref = atomic_read(&dev->dev->power.usage_count); if (ref <= 0) dev_err(dev->dev, "reference count mismatch:%d", ref); else pm_runtime_put(dev->dev); #endif } static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr) { struct msm_slim_sat *sat = NULL; int i = 0; while (!sat && i < dev->nsats) { if (laddr == dev->satd[i]->satcl.laddr) sat = dev->satd[i]; i++; } return sat; } static irqreturn_t msm_slim_interrupt(int irq, void *d) { struct msm_slim_ctrl *dev = d; u32 pstat; u32 stat = readl_relaxed(dev->base + MGR_INT_STAT); if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) { if (stat & MGR_INT_TX_MSG_SENT) writel_relaxed(MGR_INT_TX_MSG_SENT, dev->base + MGR_INT_CLR); else { u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS); u32 mgr_ie_stat = readl_relaxed(dev->base + MGR_IE_STAT); u32 frm_stat = readl_relaxed(dev->base + FRM_STAT); u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG); u32 frm_intr_stat = readl_relaxed(dev->base + FRM_INT_STAT); u32 frm_ie_stat = readl_relaxed(dev->base + FRM_IE_STAT); u32 intf_stat = readl_relaxed(dev->base + INTF_STAT); u32 intf_intr_stat = readl_relaxed(dev->base + INTF_INT_STAT); u32 intf_ie_stat = readl_relaxed(dev->base + INTF_IE_STAT); writel_relaxed(MGR_INT_TX_NACKED_2, dev->base + MGR_INT_CLR); pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x", stat, mgr_stat); pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat); pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x", frm_intr_stat, frm_stat); pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x", frm_cfg, frm_ie_stat); pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x", intf_intr_stat, intf_stat); pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat); dev->err = -EIO; } /* * Guarantee that interrupt clear bit write goes through before * signalling completion/exiting ISR */ mb(); if (dev->wr_comp) complete(dev->wr_comp); } if (stat & MGR_INT_RX_MSG_RCVD) { u32 rx_buf[10]; u32 mc, mt; u8 len, i; rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG); len = rx_buf[0] & 0x1F; for (i = 1; i < ((len + 3) >> 2); i++) { rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG + (4 * i)); dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]); } mt = (rx_buf[0] >> 5) & 0x7; mc = (rx_buf[0] >> 8) & 0xff; dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt); if (mt == SLIM_MSG_MT_DEST_REFERRED_USER || mt == SLIM_MSG_MT_SRC_REFERRED_USER) { u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF); struct msm_slim_sat *sat = addr_to_sat(dev, laddr); if (sat) msm_sat_enqueue(sat, rx_buf, len); else dev_err(dev->dev, "unknown sat:%d message", laddr); writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base + MGR_INT_CLR); /* * Guarantee that CLR bit write goes through before * queuing work */ mb(); if (sat) queue_work(sat->wq, &sat->wd); } else if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) { u8 e_addr[6]; msm_get_eaddr(e_addr, rx_buf); msm_slim_rx_enqueue(dev, rx_buf, len); writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base + MGR_INT_CLR); /* * Guarantee that CLR bit write goes through * before signalling completion */ mb(); complete(&dev->rx_msgq_notify); } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION || mc == SLIM_MSG_MC_REPLY_VALUE) { msm_slim_rx_enqueue(dev, rx_buf, len); writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base + MGR_INT_CLR); /* * Guarantee that CLR bit write goes through * before signalling completion */ mb(); complete(&dev->rx_msgq_notify); } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) { u8 *buf = (u8 *)rx_buf; u8 l_addr = buf[2]; u16 ele = (u16)buf[4] << 4; ele |= ((buf[3] & 0xf0) >> 4); dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x", l_addr, ele); for (i = 0; i < len - 5; i++) dev_err(dev->dev, "offset:0x%x:bit mask:%x", i, buf[i+5]); writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base + MGR_INT_CLR); /* * Guarantee that CLR bit write goes through * before exiting */ mb(); } else { dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d", mc, mt, len); for (i = 0; i < ((len + 3) >> 2); i++) dev_err(dev->dev, "error msg: %x", rx_buf[i]); writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base + MGR_INT_CLR); /* * Guarantee that CLR bit write goes through * before exiting */ mb(); } } if (stat & MGR_INT_RECFG_DONE) { writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR); /* * Guarantee that CLR bit write goes through * before exiting ISR */ mb(); complete(&dev->reconf); } pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver)); if (pstat != 0) { int i = 0; for (i = dev->pipe_b; i < MSM_SLIM_NPORTS; i++) { if (pstat & 1 << i) { u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn, i, dev->ver)); if (val & (1 << 19)) { dev->ctrl.ports[i].err = SLIM_P_DISCONNECT; dev->pipes[i-dev->pipe_b].connected = false; /* * SPS will call completion since * ERROR flags are registered */ } else if (val & (1 << 2)) dev->ctrl.ports[i].err = SLIM_P_OVERFLOW; else if (val & (1 << 3)) dev->ctrl.ports[i].err = SLIM_P_UNDERFLOW; } writel_relaxed(1, PGD_THIS_EE(PGD_PORT_INT_CL_EEn, dev->ver)); } /* * Guarantee that port interrupt bit(s) clearing writes go * through before exiting ISR */ mb(); } return IRQ_HANDLED; } static int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep) { int ret; struct sps_pipe *endpoint; struct sps_connect *config = &ep->config; /* Allocate the endpoint */ endpoint = sps_alloc_endpoint(); if (!endpoint) { dev_err(dev->dev, "sps_alloc_endpoint failed\n"); return -ENOMEM; } /* Get default connection configuration for an endpoint */ ret = sps_get_config(endpoint, config); if (ret) { dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret); goto sps_config_failed; } ep->sps = endpoint; return 0; sps_config_failed: sps_free_endpoint(endpoint); return ret; } static void msm_slim_free_endpoint(struct msm_slim_endp *ep) { sps_free_endpoint(ep->sps); ep->sps = NULL; } static int msm_slim_sps_mem_alloc( struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len) { dma_addr_t phys; mem->size = len; mem->min_size = 0; mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL); if (!mem->base) { dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len); return -ENOMEM; } mem->phys_base = phys; memset(mem->base, 0x00, mem->size); return 0; } static void msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem) { dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base); mem->size = 0; mem->base = NULL; mem->phys_base = 0; } static void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn) { u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT; u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver)); writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver)); writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver)); writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver)); writel_relaxed((int_port | 1 << pn) , PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver)); /* Make sure that port registers are updated before returning */ mb(); } static int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn) { struct msm_slim_endp *endpoint = &dev->pipes[pn]; struct sps_connect *cfg = &endpoint->config; u32 stat; int ret = sps_get_config(dev->pipes[pn].sps, cfg); if (ret) { dev_err(dev->dev, "sps pipe-port get config error%x\n", ret); return ret; } cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR | SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE; if (dev->pipes[pn].connected) { ret = sps_set_config(dev->pipes[pn].sps, cfg); if (ret) { dev_err(dev->dev, "sps pipe-port set config erro:%x\n", ret); return ret; } } stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->pipe_b), dev->ver)); if (dev->ctrl.ports[pn].flow == SLIM_SRC) { cfg->destination = dev->bam.hdl; cfg->source = SPS_DEV_HANDLE_MEM; cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4); cfg->src_pipe_index = 0; dev_dbg(dev->dev, "flow src:pipe num:%d", cfg->dest_pipe_index); cfg->mode = SPS_MODE_DEST; } else { cfg->source = dev->bam.hdl; cfg->destination = SPS_DEV_HANDLE_MEM; cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4); cfg->dest_pipe_index = 0; dev_dbg(dev->dev, "flow dest:pipe num:%d", cfg->src_pipe_index); cfg->mode = SPS_MODE_SRC; } /* Space for desciptor FIFOs */ cfg->desc.size = MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec); cfg->config = SPS_CONFIG_DEFAULT; ret = sps_connect(dev->pipes[pn].sps, cfg); if (!ret) { dev->pipes[pn].connected = true; msm_hw_set_port(dev, pn + dev->pipe_b); } return ret; } static u32 *msm_get_msg_buf(struct slim_controller *ctrl, int len) { struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); /* * Currently we block a transaction until the current one completes. * In case we need multiple transactions, use message Q */ return dev->tx_buf; } static int msm_send_msg_buf(struct slim_controller *ctrl, u32 *buf, u8 len) { int i; struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); for (i = 0; i < (len + 3) >> 2; i++) { dev_dbg(dev->dev, "TX data:0x%x\n", buf[i]); writel_relaxed(buf[i], dev->base + MGR_TX_MSG + (i * 4)); } /* Guarantee that message is sent before returning */ mb(); return 0; } static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn) { DECLARE_COMPLETION_ONSTACK(done); struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); u32 *pbuf; u8 *puc; int timeout; int msgv = -1; u8 la = txn->la; u8 mc = (u8)(txn->mc & 0xFF); /* * Voting for runtime PM: Slimbus has 2 possible use cases: * 1. messaging * 2. Data channels * Messaging case goes through messaging slots and data channels * use their own slots * This "get" votes for messaging bandwidth */ if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) msgv = msm_slim_get_ctrl(dev); mutex_lock(&dev->tx_lock); if (dev->state == MSM_CTRL_ASLEEP || ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) && dev->state == MSM_CTRL_SLEEPING)) { dev_err(dev->dev, "runtime or system PM suspended state"); mutex_unlock(&dev->tx_lock); if (msgv >= 0) msm_slim_put_ctrl(dev); return -EBUSY; } if (txn->mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) { if (dev->reconf_busy) { wait_for_completion(&dev->reconf); dev->reconf_busy = false; } /* This "get" votes for data channels */ if (dev->ctrl.sched.usedslots != 0 && !dev->chan_active) { int chv = msm_slim_get_ctrl(dev); if (chv >= 0) dev->chan_active = true; } } txn->rl--; pbuf = msm_get_msg_buf(ctrl, txn->rl); dev->wr_comp = NULL; dev->err = 0; if (txn->dt == SLIM_MSG_DEST_ENUMADDR) { mutex_unlock(&dev->tx_lock); if (msgv >= 0) msm_slim_put_ctrl(dev); return -EPROTONOSUPPORT; } if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF && (mc == SLIM_MSG_MC_CONNECT_SOURCE || mc == SLIM_MSG_MC_CONNECT_SINK || mc == SLIM_MSG_MC_DISCONNECT_PORT)) la = dev->pgdla; if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la); else *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la); if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) puc = ((u8 *)pbuf) + 3; else puc = ((u8 *)pbuf) + 2; if (txn->rbuf) *(puc++) = txn->tid; if ((txn->mt == SLIM_MSG_MT_CORE) && ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION && mc <= SLIM_MSG_MC_REPORT_INFORMATION) || (mc >= SLIM_MSG_MC_REQUEST_VALUE && mc <= SLIM_MSG_MC_CHANGE_VALUE))) { *(puc++) = (txn->ec & 0xFF); *(puc++) = (txn->ec >> 8)&0xFF; } if (txn->wbuf) memcpy(puc, txn->wbuf, txn->len); if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF && (mc == SLIM_MSG_MC_CONNECT_SOURCE || mc == SLIM_MSG_MC_CONNECT_SINK || mc == SLIM_MSG_MC_DISCONNECT_PORT)) { if (mc != SLIM_MSG_MC_DISCONNECT_PORT) dev->err = msm_slim_connect_pipe_port(dev, *puc); else { struct msm_slim_endp *endpoint = &dev->pipes[*puc]; struct sps_register_event sps_event; memset(&sps_event, 0, sizeof(sps_event)); sps_register_event(endpoint->sps, &sps_event); sps_disconnect(endpoint->sps); /* * Remove channel disconnects master-side ports from * channel. No need to send that again on the bus */ dev->pipes[*puc].connected = false; mutex_unlock(&dev->tx_lock); if (msgv >= 0) msm_slim_put_ctrl(dev); return 0; } if (dev->err) { dev_err(dev->dev, "pipe-port connect err:%d", dev->err); mutex_unlock(&dev->tx_lock); if (msgv >= 0) msm_slim_put_ctrl(dev); return dev->err; } *(puc) = *(puc) + dev->pipe_b; } if (txn->mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) dev->reconf_busy = true; dev->wr_comp = &done; msm_send_msg_buf(ctrl, pbuf, txn->rl); timeout = wait_for_completion_timeout(&done, HZ); if (!timeout) dev->wr_comp = NULL; if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) { if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW | SLIM_MSG_CLK_PAUSE_SEQ_FLG)) && timeout) { timeout = wait_for_completion_timeout(&dev->reconf, HZ); dev->reconf_busy = false; if (timeout) { clk_disable_unprepare(dev->rclk); disable_irq(dev->irq); } } if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW | SLIM_MSG_CLK_PAUSE_SEQ_FLG)) && !timeout) { dev->reconf_busy = false; dev_err(dev->dev, "clock pause failed"); mutex_unlock(&dev->tx_lock); return -ETIMEDOUT; } if (txn->mt == SLIM_MSG_MT_CORE && txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) { if (dev->ctrl.sched.usedslots == 0 && dev->chan_active) { dev->chan_active = false; msm_slim_put_ctrl(dev); } } } mutex_unlock(&dev->tx_lock); if (msgv >= 0) msm_slim_put_ctrl(dev); if (!timeout) dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); return timeout ? dev->err : -ETIMEDOUT; } static void msm_slim_wait_retry(struct msm_slim_ctrl *dev) { int msec_per_frm = 0; int sfr_per_sec; /* Wait for 1 superframe, or default time and then retry */ sfr_per_sec = dev->framer.superfreq / (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear)); if (sfr_per_sec) msec_per_frm = MSEC_PER_SEC / sfr_per_sec; if (msec_per_frm < DEF_RETRY_MS) msec_per_frm = DEF_RETRY_MS; msleep(msec_per_frm); } static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea, u8 elen, u8 laddr) { struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); struct completion done; int timeout, ret, retries = 0; u32 *buf; retry_laddr: init_completion(&done); mutex_lock(&dev->tx_lock); buf = msm_get_msg_buf(ctrl, 9); buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS, SLIM_MSG_DEST_LOGICALADDR, ea[5] | ea[4] << 8); buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24); buf[2] = laddr; dev->wr_comp = &done; ret = msm_send_msg_buf(ctrl, buf, 9); timeout = wait_for_completion_timeout(&done, HZ); if (!timeout) dev->err = -ETIMEDOUT; if (dev->err) { ret = dev->err; dev->err = 0; dev->wr_comp = NULL; } mutex_unlock(&dev->tx_lock); if (ret) { pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret); if (retries < INIT_MX_RETRIES) { msm_slim_wait_retry(dev); retries++; goto retry_laddr; } else { pr_err("set LADDR failed after retrying:ret:%d", ret); } } return ret; } static int msm_clk_pause_wakeup(struct slim_controller *ctrl) { struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); enable_irq(dev->irq); clk_prepare_enable(dev->rclk); writel_relaxed(1, dev->base + FRM_WAKEUP); /* Make sure framer wakeup write goes through before exiting function */ mb(); /* * Workaround: Currently, slave is reporting lost-sync messages * after slimbus comes out of clock pause. * Transaction with slave fail before slave reports that message * Give some time for that report to come * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe * being 250 usecs, we wait for 20 superframes here to ensure * we get the message */ usleep_range(5000, 5000); return 0; } static int msm_config_port(struct slim_controller *ctrl, u8 pn) { struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); struct msm_slim_endp *endpoint; int ret = 0; if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP || ctrl->ports[pn].req == SLIM_REQ_MULTI_CH) return -EPROTONOSUPPORT; if (pn >= (MSM_SLIM_NPORTS - dev->pipe_b)) return -ENODEV; endpoint = &dev->pipes[pn]; ret = msm_slim_init_endpoint(dev, endpoint); dev_dbg(dev->dev, "sps register bam error code:%x\n", ret); return ret; } static enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr, u8 pn, u8 **done_buf, u32 *done_len) { struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr); struct sps_iovec sio; int ret; if (done_len) *done_len = 0; if (done_buf) *done_buf = NULL; if (!dev->pipes[pn].connected) return SLIM_P_DISCONNECT; ret = sps_get_iovec(dev->pipes[pn].sps, &sio); if (!ret) { if (done_len) *done_len = sio.size; if (done_buf) *done_buf = (u8 *)sio.addr; } dev_dbg(dev->dev, "get iovec returned %d\n", ret); return SLIM_P_INPROGRESS; } static int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf, u32 len, struct completion *comp) { struct sps_register_event sreg; int ret; struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); if (pn >= 7) return -ENODEV; ctrl->ports[pn].xcomp = comp; sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR); sreg.mode = SPS_TRIGGER_WAIT; sreg.xfer_done = comp; sreg.callback = NULL; sreg.user = &ctrl->ports[pn]; ret = sps_register_event(dev->pipes[pn].sps, &sreg); if (ret) { dev_dbg(dev->dev, "sps register event error:%x\n", ret); return ret; } ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, NULL, SPS_IOVEC_FLAG_INT); dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret); return ret; } static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc) { struct msm_slim_ctrl *dev = sat->dev; enum slim_ch_control oper; int i; int ret = 0; if (mc == SLIM_USR_MC_CHAN_CTRL) { for (i = 0; i < sat->nsatch; i++) { if (buf[5] == sat->satch[i].chan) break; } if (i >= sat->nsatch) return -ENOTCONN; oper = ((buf[3] & 0xC0) >> 6); /* part of grp. activating/removing 1 will take care of rest */ ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper, false); if (!ret) { for (i = 5; i < len; i++) { int j; for (j = 0; j < sat->nsatch; j++) { if (buf[i] == sat->satch[j].chan) { if (oper == SLIM_CH_REMOVE) sat->satch[j].req_rem++; else sat->satch[j].req_def++; break; } } } } } else { u16 chh[40]; struct slim_ch prop; u32 exp; u8 coeff, cc; u8 prrate = buf[6]; if (len <= 8) return -EINVAL; for (i = 8; i < len; i++) { int j = 0; for (j = 0; j < sat->nsatch; j++) { if (sat->satch[j].chan == buf[i]) { chh[i - 8] = sat->satch[j].chanh; break; } } if (j < sat->nsatch) { u16 dummy; ret = slim_query_ch(&sat->satcl, buf[i], &dummy); if (ret) return ret; if (mc == SLIM_USR_MC_DEF_ACT_CHAN) sat->satch[j].req_def++; continue; } if (sat->nsatch >= MSM_MAX_SATCH) return -EXFULL; ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]); if (ret) return ret; sat->satch[j].chan = buf[i]; sat->satch[j].chanh = chh[i - 8]; if (mc == SLIM_USR_MC_DEF_ACT_CHAN) sat->satch[j].req_def++; sat->nsatch++; } prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5); prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5); prop.baser = SLIM_RATE_4000HZ; if (prrate & 0x8) prop.baser = SLIM_RATE_11025HZ; else prop.baser = SLIM_RATE_4000HZ; prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F); prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL; exp = (u32)((buf[5] & 0xF0) >> 4); coeff = (buf[4] & 0x20) >> 5; cc = (coeff ? 3 : 1); prop.ratem = cc * (1 << exp); if (i > 9) ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8, true, &chh[0]); else ret = slim_define_ch(&sat->satcl, &prop, &chh[0], 1, false, NULL); dev_dbg(dev->dev, "define sat grp returned:%d", ret); if (ret) return ret; /* part of group so activating 1 will take care of rest */ if (mc == SLIM_USR_MC_DEF_ACT_CHAN) ret = slim_control_ch(&sat->satcl, chh[0], SLIM_CH_ACTIVATE, false); } return ret; } static void msm_slim_rxwq(struct msm_slim_ctrl *dev) { u8 buf[40]; u8 mc, mt, len; int i, ret; if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) { len = buf[0] & 0x1F; mt = (buf[0] >> 5) & 0x7; mc = buf[1]; if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) { u8 laddr; u8 e_addr[6]; for (i = 0; i < 6; i++) e_addr[i] = buf[7-i]; ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr); /* Is this Qualcomm ported generic device? */ if (!ret && e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB && e_addr[1] == QC_DEVID_PGD && e_addr[2] != QC_CHIPID_SL) dev->pgdla = laddr; if (!ret && !pm_runtime_enabled(dev->dev) && laddr == (QC_MSM_DEVS - 1)) pm_runtime_enable(dev->dev); if (!ret && msm_is_sat_dev(e_addr)) { struct msm_slim_sat *sat = addr_to_sat(dev, laddr); if (!sat) sat = msm_slim_alloc_sat(dev); if (!sat) return; sat->satcl.laddr = laddr; msm_sat_enqueue(sat, (u32 *)buf, len); queue_work(sat->wq, &sat->wd); } if (ret) pr_err("assign laddr failed, error:%d", ret); } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION || mc == SLIM_MSG_MC_REPLY_VALUE) { u8 tid = buf[3]; dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4); slim_msg_response(&dev->ctrl, &buf[4], tid, len - 4); pm_runtime_mark_last_busy(dev->dev); } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) { u8 l_addr = buf[2]; u16 ele = (u16)buf[4] << 4; ele |= ((buf[3] & 0xf0) >> 4); dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x", l_addr, ele); for (i = 0; i < len - 5; i++) dev_err(dev->dev, "offset:0x%x:bit mask:%x", i, buf[i+5]); } else { dev_err(dev->dev, "unexpected message:mc:%x, mt:%x", mc, mt); for (i = 0; i < len; i++) dev_err(dev->dev, "error msg: %x", buf[i]); } } else dev_err(dev->dev, "rxwq called and no dequeue"); } static void slim_sat_rxprocess(struct work_struct *work) { struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd); struct msm_slim_ctrl *dev = sat->dev; u8 buf[40]; while ((msm_sat_dequeue(sat, buf)) != -ENODATA) { struct slim_msg_txn txn; u8 len, mc, mt; u32 bw_sl; int ret = 0; int satv = -1; bool gen_ack = false; u8 tid; u8 wbuf[8]; int i, retries = 0; txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER; txn.dt = SLIM_MSG_DEST_LOGICALADDR; txn.ec = 0; txn.rbuf = NULL; txn.la = sat->satcl.laddr; /* satellite handling */ len = buf[0] & 0x1F; mc = buf[1]; mt = (buf[0] >> 5) & 0x7; if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) { u8 e_addr[6]; for (i = 0; i < 6; i++) e_addr[i] = buf[7-i]; if (pm_runtime_enabled(dev->dev)) { satv = msm_slim_get_ctrl(dev); if (satv >= 0) sat->pending_capability = true; } /* * Since capability message is already sent, present * message will indicate subsystem hosting this * satellite has restarted. * Remove all active channels of this satellite * when this is detected */ if (sat->sent_capability) { for (i = 0; i < sat->nsatch; i++) { if (sat->satch[i].reconf) { pr_err("SSR, sat:%d, rm ch:%d", sat->satcl.laddr, sat->satch[i].chan); slim_control_ch(&sat->satcl, sat->satch[i].chanh, SLIM_CH_REMOVE, true); sat->satch[i].reconf = false; } } } } else if (mt != SLIM_MSG_MT_CORE && mc != SLIM_MSG_MC_REPORT_PRESENT) { satv = msm_slim_get_ctrl(dev); } switch (mc) { case SLIM_MSG_MC_REPORT_PRESENT: /* Remove runtime_pm vote once satellite acks */ if (mt != SLIM_MSG_MT_CORE) { if (pm_runtime_enabled(dev->dev) && sat->pending_capability) { msm_slim_put_ctrl(dev); sat->pending_capability = false; } continue; } /* send a Manager capability msg */ if (sat->sent_capability) { if (mt == SLIM_MSG_MT_CORE) goto send_capability; else continue; } ret = slim_add_device(&dev->ctrl, &sat->satcl); if (ret) { dev_err(dev->dev, "Satellite-init failed"); continue; } /* Satellite-channels */ sat->satch = kzalloc(MSM_MAX_SATCH * sizeof(struct msm_sat_chan), GFP_KERNEL); send_capability: txn.mc = SLIM_USR_MC_MASTER_CAPABILITY; txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER; txn.la = sat->satcl.laddr; txn.rl = 8; wbuf[0] = SAT_MAGIC_LSB; wbuf[1] = SAT_MAGIC_MSB; wbuf[2] = SAT_MSG_VER; wbuf[3] = SAT_MSG_PROT; txn.wbuf = wbuf; txn.len = 4; ret = msm_xfer_msg(&dev->ctrl, &txn); if (ret) { pr_err("capability for:0x%x fail:%d, retry:%d", sat->satcl.laddr, ret, retries); if (retries < INIT_MX_RETRIES) { msm_slim_wait_retry(dev); retries++; goto send_capability; } else { pr_err("failed after all retries:%d", ret); } } else { sat->sent_capability = true; } break; case SLIM_USR_MC_ADDR_QUERY: memcpy(&wbuf[1], &buf[4], 6); ret = slim_get_logical_addr(&sat->satcl, &wbuf[1], 6, &wbuf[7]); if (ret) memset(&wbuf[1], 0, 6); wbuf[0] = buf[3]; txn.mc = SLIM_USR_MC_ADDR_REPLY; txn.rl = 12; txn.len = 8; txn.wbuf = wbuf; msm_xfer_msg(&dev->ctrl, &txn); break; case SLIM_USR_MC_DEFINE_CHAN: case SLIM_USR_MC_DEF_ACT_CHAN: case SLIM_USR_MC_CHAN_CTRL: if (mc != SLIM_USR_MC_CHAN_CTRL) tid = buf[7]; else tid = buf[4]; gen_ack = true; ret = msm_sat_define_ch(sat, buf, len, mc); if (ret) { dev_err(dev->dev, "SAT define_ch returned:%d", ret); } if (!sat->pending_reconf) { int chv = msm_slim_get_ctrl(dev); if (chv >= 0) sat->pending_reconf = true; } break; case SLIM_USR_MC_RECONFIG_NOW: tid = buf[3]; gen_ack = true; ret = slim_reconfigure_now(&sat->satcl); for (i = 0; i < sat->nsatch; i++) { struct msm_sat_chan *sch = &sat->satch[i]; if (sch->req_rem && sch->reconf) { if (!ret) { slim_dealloc_ch(&sat->satcl, sch->chanh); sch->reconf = false; } sch->req_rem--; } else if (sch->req_def) { if (ret) slim_dealloc_ch(&sat->satcl, sch->chanh); else sch->reconf = true; sch->req_def--; } } if (sat->pending_reconf) { msm_slim_put_ctrl(dev); sat->pending_reconf = false; } break; case SLIM_USR_MC_REQ_BW: /* what we get is in SLOTS */ bw_sl = (u32)buf[4] << 3 | ((buf[3] & 0xE0) >> 5); sat->satcl.pending_msgsl = bw_sl; tid = buf[5]; gen_ack = true; break; case SLIM_USR_MC_CONNECT_SRC: case SLIM_USR_MC_CONNECT_SINK: if (mc == SLIM_USR_MC_CONNECT_SRC) txn.mc = SLIM_MSG_MC_CONNECT_SOURCE; else txn.mc = SLIM_MSG_MC_CONNECT_SINK; wbuf[0] = buf[4] & 0x1F; wbuf[1] = buf[5]; tid = buf[6]; txn.la = buf[3]; txn.mt = SLIM_MSG_MT_CORE; txn.rl = 6; txn.len = 2; txn.wbuf = wbuf; gen_ack = true; ret = msm_xfer_msg(&dev->ctrl, &txn); break; case SLIM_USR_MC_DISCONNECT_PORT: txn.mc = SLIM_MSG_MC_DISCONNECT_PORT; wbuf[0] = buf[4] & 0x1F; tid = buf[5]; txn.la = buf[3]; txn.rl = 5; txn.len = 1; txn.mt = SLIM_MSG_MT_CORE; txn.wbuf = wbuf; gen_ack = true; ret = msm_xfer_msg(&dev->ctrl, &txn); default: break; } if (!gen_ack) { if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0) msm_slim_put_ctrl(dev); continue; } wbuf[0] = tid; if (!ret) wbuf[1] = MSM_SAT_SUCCSS; else wbuf[1] = 0; txn.mc = SLIM_USR_MC_GENERIC_ACK; txn.la = sat->satcl.laddr; txn.rl = 6; txn.len = 2; txn.wbuf = wbuf; txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER; msm_xfer_msg(&dev->ctrl, &txn); if (satv >= 0) msm_slim_put_ctrl(dev); } } static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev) { struct msm_slim_sat *sat; char *name; if (dev->nsats >= MSM_MAX_NSATS) return NULL; sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL); if (!sat) { dev_err(dev->dev, "no memory for satellite"); return NULL; } name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL); if (!name) { dev_err(dev->dev, "no memory for satellite name"); kfree(sat); return NULL; } dev->satd[dev->nsats] = sat; sat->dev = dev; snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats); sat->satcl.name = name; spin_lock_init(&sat->lock); INIT_WORK(&sat->wd, slim_sat_rxprocess); sat->wq = create_singlethread_workqueue(sat->satcl.name); if (!sat->wq) { kfree(name); kfree(sat); return NULL; } /* * Both sats will be allocated from RX thread and RX thread will * process messages sequentially. No synchronization necessary */ dev->nsats++; return sat; } static void msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev) { u32 *buf = ev->data.transfer.user; struct sps_iovec *iovec = &ev->data.transfer.iovec; /* * Note the virtual address needs to be offset by the same index * as the physical address or just pass in the actual virtual address * if the sps_mem_buffer is not needed. Note that if completion is * used, the virtual address won't be available and will need to be * calculated based on the offset of the physical address */ if (ev->event_id == SPS_EVENT_DESC_DONE) { pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf); pr_debug("iovec = (0x%x 0x%x 0x%x)\n", iovec->addr, iovec->size, iovec->flags); } else { dev_err(dev->dev, "%s: unknown event %d\n", __func__, ev->event_id); } } static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify) { struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user; msm_slim_rx_msgq_event(dev, notify); } /* Queue up Rx message buffer */ static inline int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix) { int ret; u32 flags = SPS_IOVEC_FLAG_INT; struct msm_slim_endp *endpoint = &dev->rx_msgq; struct sps_mem_buffer *mem = &endpoint->buf; struct sps_pipe *pipe = endpoint->sps; /* Rx message queue buffers are 4 bytes in length */ u8 *virt_addr = mem->base + (4 * ix); u32 phys_addr = mem->phys_base + (4 * ix); pr_debug("index:%d, phys:0x%x, virt:0x%p\n", ix, phys_addr, virt_addr); ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags); if (ret) dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix); return ret; } static inline int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset) { struct msm_slim_endp *endpoint = &dev->rx_msgq; struct sps_mem_buffer *mem = &endpoint->buf; struct sps_pipe *pipe = endpoint->sps; struct sps_iovec iovec; int index; int ret; ret = sps_get_iovec(pipe, &iovec); if (ret) { dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret); goto err_exit; } pr_debug("iovec = (0x%x 0x%x 0x%x)\n", iovec.addr, iovec.size, iovec.flags); BUG_ON(iovec.addr < mem->phys_base); BUG_ON(iovec.addr >= mem->phys_base + mem->size); /* Calculate buffer index */ index = (iovec.addr - mem->phys_base) / 4; *(data + offset) = *((u32 *)mem->base + index); pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data); /* Add buffer back to the queue */ (void)msm_slim_post_rx_msgq(dev, index); err_exit: return ret; } static int msm_slim_rx_msgq_thread(void *data) { struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data; struct completion *notify = &dev->rx_msgq_notify; struct msm_slim_sat *sat = NULL; u32 mc = 0; u32 mt = 0; u32 buffer[10]; int index = 0; u8 msg_len = 0; int ret; dev_dbg(dev->dev, "rx thread started"); while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); ret = wait_for_completion_interruptible(notify); if (ret) dev_err(dev->dev, "rx thread wait error:%d", ret); /* 1 irq notification per message */ if (!dev->use_rx_msgqs) { msm_slim_rxwq(dev); continue; } ret = msm_slim_rx_msgq_get(dev, buffer, index); if (ret) { dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret); continue; } pr_debug("message[%d] = 0x%x\n", index, *buffer); /* Decide if we use generic RX or satellite RX */ if (index++ == 0) { msg_len = *buffer & 0x1F; pr_debug("Start of new message, len = %d\n", msg_len); mt = (buffer[0] >> 5) & 0x7; mc = (buffer[0] >> 8) & 0xff; dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt); if (mt == SLIM_MSG_MT_DEST_REFERRED_USER || mt == SLIM_MSG_MT_SRC_REFERRED_USER) { u8 laddr; laddr = (u8)((buffer[0] >> 16) & 0xff); sat = addr_to_sat(dev, laddr); } } else if ((index * 4) >= msg_len) { index = 0; if (sat) { msm_sat_enqueue(sat, buffer, msg_len); queue_work(sat->wq, &sat->wd); sat = NULL; } else { msm_slim_rx_enqueue(dev, buffer, msg_len); msm_slim_rxwq(dev); } } } return 0; } static int __devinit msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev) { int i, ret; u32 pipe_offset; struct msm_slim_endp *endpoint = &dev->rx_msgq; struct sps_connect *config = &endpoint->config; struct sps_mem_buffer *descr = &config->desc; struct sps_mem_buffer *mem = &endpoint->buf; struct completion *notify = &dev->rx_msgq_notify; struct sps_register_event sps_error_event; /* SPS_ERROR */ struct sps_register_event sps_descr_event; /* DESCR_DONE */ init_completion(notify); if (!dev->use_rx_msgqs) goto rx_thread_create; /* Allocate the endpoint */ ret = msm_slim_init_endpoint(dev, endpoint); if (ret) { dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret); goto sps_init_endpoint_failed; } /* Get the pipe indices for the message queues */ pipe_offset = (readl_relaxed(dev->base + MGR_STATUS) & 0xfc) >> 2; dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset); config->mode = SPS_MODE_SRC; config->source = dev->bam.hdl; config->destination = SPS_DEV_HANDLE_MEM; config->src_pipe_index = pipe_offset; config->options = SPS_O_DESC_DONE | SPS_O_ERROR | SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE; /* Allocate memory for the FIFO descriptors */ ret = msm_slim_sps_mem_alloc(dev, descr, MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec)); if (ret) { dev_err(dev->dev, "unable to allocate SPS descriptors\n"); goto alloc_descr_failed; } ret = sps_connect(endpoint->sps, config); if (ret) { dev_err(dev->dev, "sps_connect failed 0x%x\n", ret); goto sps_connect_failed; } /* Register completion for DESC_DONE */ init_completion(notify); memset(&sps_descr_event, 0x00, sizeof(sps_descr_event)); sps_descr_event.mode = SPS_TRIGGER_CALLBACK; sps_descr_event.options = SPS_O_DESC_DONE; sps_descr_event.user = (void *)dev; sps_descr_event.xfer_done = notify; ret = sps_register_event(endpoint->sps, &sps_descr_event); if (ret) { dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret); goto sps_reg_event_failed; } /* Register callback for errors */ memset(&sps_error_event, 0x00, sizeof(sps_error_event)); sps_error_event.mode = SPS_TRIGGER_CALLBACK; sps_error_event.options = SPS_O_ERROR; sps_error_event.user = (void *)dev; sps_error_event.callback = msm_slim_rx_msgq_cb; ret = sps_register_event(endpoint->sps, &sps_error_event); if (ret) { dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret); goto sps_reg_event_failed; } /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */ ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4); if (ret) { dev_err(dev->dev, "dma_alloc_coherent failed\n"); goto alloc_buffer_failed; } /* * Call transfer_one for each 4-byte buffer * Use (buf->size/4) - 1 for the number of buffer to post */ /* Setup the transfer */ for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) { ret = msm_slim_post_rx_msgq(dev, i); if (ret) { dev_err(dev->dev, "post_rx_msgq() failed 0x%x\n", ret); goto sps_transfer_failed; } } rx_thread_create: /* Fire up the Rx message queue thread */ dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev, MSM_SLIM_NAME "_rx_msgq_thread"); if (!dev->rx_msgq_thread) { dev_err(dev->dev, "Failed to start Rx message queue thread\n"); /* Tear-down BAMs or return? */ if (!dev->use_rx_msgqs) return -EIO; else ret = -EIO; } else return 0; sps_transfer_failed: msm_slim_sps_mem_free(dev, mem); alloc_buffer_failed: memset(&sps_error_event, 0x00, sizeof(sps_error_event)); sps_register_event(endpoint->sps, &sps_error_event); sps_reg_event_failed: sps_disconnect(endpoint->sps); sps_connect_failed: msm_slim_sps_mem_free(dev, descr); alloc_descr_failed: msm_slim_free_endpoint(endpoint); sps_init_endpoint_failed: dev->use_rx_msgqs = 0; return ret; } /* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */ static int __devinit msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem) { int i, ret; u32 bam_handle; struct sps_bam_props bam_props = {0}; static struct sps_bam_sec_config_props sec_props = { .ees = { [0] = { /* LPASS */ .vmid = 0, .pipe_mask = 0xFFFF98, }, [1] = { /* Krait Apps */ .vmid = 1, .pipe_mask = 0x3F000007, }, [2] = { /* Modem */ .vmid = 2, .pipe_mask = 0x00000060, }, }, }; if (!dev->use_rx_msgqs) goto init_rx_msgq; bam_props.ee = dev->ee; bam_props.virt_addr = dev->bam.base; bam_props.phys_addr = bam_mem->start; bam_props.irq = dev->bam.irq; bam_props.manage = SPS_BAM_MGR_LOCAL; bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD; bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG; bam_props.p_sec_config_props = &sec_props; bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR | SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE; /* First 7 bits are for message Qs */ for (i = 7; i < 32; i++) { /* Check what pipes are owned by Apps. */ if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1) break; } dev->pipe_b = i - 7; /* Register the BAM device with the SPS driver */ ret = sps_register_bam_device(&bam_props, &bam_handle); if (ret) { dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret); dev->use_rx_msgqs = 0; goto init_rx_msgq; } dev->bam.hdl = bam_handle; dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle); init_rx_msgq: ret = msm_slim_init_rx_msgq(dev); if (ret) dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret); if (!dev->use_rx_msgqs && bam_handle) { sps_deregister_bam_device(bam_handle); dev->bam.hdl = 0L; } return ret; } static void msm_slim_sps_exit(struct msm_slim_ctrl *dev) { if (dev->use_rx_msgqs) { struct msm_slim_endp *endpoint = &dev->rx_msgq; struct sps_connect *config = &endpoint->config; struct sps_mem_buffer *descr = &config->desc; struct sps_mem_buffer *mem = &endpoint->buf; struct sps_register_event sps_event; memset(&sps_event, 0x00, sizeof(sps_event)); msm_slim_sps_mem_free(dev, mem); sps_register_event(endpoint->sps, &sps_event); sps_disconnect(endpoint->sps); msm_slim_sps_mem_free(dev, descr); msm_slim_free_endpoint(endpoint); sps_deregister_bam_device(dev->bam.hdl); } } static void msm_slim_prg_slew(struct platform_device *pdev, struct msm_slim_ctrl *dev) { struct resource *slew_io; void __iomem *slew_reg; /* SLEW RATE register for this slimbus */ dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimbus_slew_reg"); if (!dev->slew_mem) { dev_dbg(&pdev->dev, "no slimbus slew resource\n"); return; } slew_io = request_mem_region(dev->slew_mem->start, resource_size(dev->slew_mem), pdev->name); if (!slew_io) { dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n"); dev->slew_mem = NULL; return; } slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem)); if (!slew_reg) { dev_dbg(dev->dev, "slew register mapping failed"); release_mem_region(dev->slew_mem->start, resource_size(dev->slew_mem)); dev->slew_mem = NULL; return; } writel_relaxed(1, slew_reg); /* Make sure slimbus-slew rate enabling goes through */ wmb(); iounmap(slew_reg); } static int __devinit msm_slim_probe(struct platform_device *pdev) { struct msm_slim_ctrl *dev; int ret; struct resource *bam_mem, *bam_io; struct resource *slim_mem, *slim_io; struct resource *irq, *bam_irq; slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimbus_physical"); if (!slim_mem) { dev_err(&pdev->dev, "no slimbus physical memory resource\n"); return -ENODEV; } slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem), pdev->name); if (!slim_io) { dev_err(&pdev->dev, "slimbus memory already claimed\n"); return -EBUSY; } bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimbus_bam_physical"); if (!bam_mem) { dev_err(&pdev->dev, "no slimbus BAM memory resource\n"); ret = -ENODEV; goto err_get_res_bam_failed; } bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem), pdev->name); if (!bam_io) { release_mem_region(slim_mem->start, resource_size(slim_mem)); dev_err(&pdev->dev, "slimbus BAM memory already claimed\n"); ret = -EBUSY; goto err_get_res_bam_failed; } irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "slimbus_irq"); if (!irq) { dev_err(&pdev->dev, "no slimbus IRQ resource\n"); ret = -ENODEV; goto err_get_res_failed; } bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "slimbus_bam_irq"); if (!bam_irq) { dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n"); ret = -ENODEV; goto err_get_res_failed; } dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL); if (!dev) { dev_err(&pdev->dev, "no memory for MSM slimbus controller\n"); ret = -ENOMEM; goto err_get_res_failed; } dev->dev = &pdev->dev; platform_set_drvdata(pdev, dev); slim_set_ctrldata(&dev->ctrl, dev); dev->base = ioremap(slim_mem->start, resource_size(slim_mem)); if (!dev->base) { dev_err(&pdev->dev, "IOremap failed\n"); ret = -ENOMEM; goto err_ioremap_failed; } dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem)); if (!dev->bam.base) { dev_err(&pdev->dev, "BAM IOremap failed\n"); ret = -ENOMEM; goto err_ioremap_bam_failed; } if (pdev->dev.of_node) { ret = of_property_read_u32(pdev->dev.of_node, "cell-index", &dev->ctrl.nr); if (ret) { dev_err(&pdev->dev, "Cell index not specified:%d", ret); goto err_of_init_failed; } /* Optional properties */ ret = of_property_read_u32(pdev->dev.of_node, "qcom,min-clk-gear", &dev->ctrl.min_cg); ret = of_property_read_u32(pdev->dev.of_node, "qcom,max-clk-gear", &dev->ctrl.max_cg); pr_err("min_cg:%d, max_cg:%d, ret:%d", dev->ctrl.min_cg, dev->ctrl.max_cg, ret); } else { dev->ctrl.nr = pdev->id; } dev->ctrl.nchans = MSM_SLIM_NCHANS; dev->ctrl.nports = MSM_SLIM_NPORTS; dev->ctrl.set_laddr = msm_set_laddr; dev->ctrl.xfer_msg = msm_xfer_msg; dev->ctrl.wakeup = msm_clk_pause_wakeup; dev->ctrl.config_port = msm_config_port; dev->ctrl.port_xfer = msm_slim_port_xfer; dev->ctrl.port_xfer_status = msm_slim_port_xfer_status; /* Reserve some messaging BW for satellite-apps driver communication */ dev->ctrl.sched.pending_msgsl = 30; init_completion(&dev->reconf); mutex_init(&dev->tx_lock); spin_lock_init(&dev->rx_lock); dev->ee = 1; dev->use_rx_msgqs = 1; dev->irq = irq->start; dev->bam.irq = bam_irq->start; ret = msm_slim_sps_init(dev, bam_mem); if (ret != 0) { dev_err(dev->dev, "error SPS init\n"); goto err_sps_init_failed; } dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3; dev->framer.superfreq = dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8; dev->ctrl.a_framer = &dev->framer; dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR; dev->ctrl.dev.parent = &pdev->dev; dev->ctrl.dev.of_node = pdev->dev.of_node; ret = request_irq(dev->irq, msm_slim_interrupt, IRQF_TRIGGER_HIGH, "msm_slim_irq", dev); if (ret) { dev_err(&pdev->dev, "request IRQ failed\n"); goto err_request_irq_failed; } msm_slim_prg_slew(pdev, dev); /* Register with framework before enabling frame, clock */ ret = slim_add_numbered_controller(&dev->ctrl); if (ret) { dev_err(dev->dev, "error adding controller\n"); goto err_ctrl_failed; } dev->rclk = clk_get(dev->dev, "core_clk"); if (!dev->rclk) { dev_err(dev->dev, "slimbus clock not found"); goto err_clk_get_failed; } clk_set_rate(dev->rclk, SLIM_ROOT_FREQ); clk_prepare_enable(dev->rclk); dev->ver = readl_relaxed(dev->base); /* Version info in 16 MSbits */ dev->ver >>= 16; /* Component register initialization */ writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver)); writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1), dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver)); /* * Manager register initialization * If RX msg Q is used, disable RX_MSG_RCVD interrupt */ if (dev->use_rx_msgqs) writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 | MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */ MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN); else writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 | MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD | MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN); writel_relaxed(1, dev->base + MGR_CFG); /* * Framer registers are beyond 1K memory region after Manager and/or * component registers. Make sure those writes are ordered * before framer register writes */ wmb(); /* Framer register initialization */ writel_relaxed((0xA << REF_CLK_GEAR) | (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1, dev->base + FRM_CFG); /* * Make sure that framer wake-up and enabling writes go through * before any other component is enabled. Framer is responsible for * clocking the bus and enabling framer first will ensure that other * devices can report presence when they are enabled */ mb(); /* Enable RX msg Q */ if (dev->use_rx_msgqs) writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN, dev->base + MGR_CFG); else writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG); /* * Make sure that manager-enable is written through before interface * device is enabled */ mb(); writel_relaxed(1, dev->base + INTF_CFG); /* * Make sure that interface-enable is written through before enabling * ported generic device inside MSM manager */ mb(); writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver)); writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) + (4 * dev->ee)); /* * Make sure that ported generic device is enabled and port-EE settings * are written through before finally enabling the component */ mb(); writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver)); /* * Make sure that all writes have gone through before exiting this * function */ mb(); if (pdev->dev.of_node) of_register_slim_devices(&dev->ctrl); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND); pm_runtime_set_active(&pdev->dev); dev_dbg(dev->dev, "MSM SB controller is up!\n"); return 0; err_ctrl_failed: writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver)); err_clk_get_failed: kfree(dev->satd); err_request_irq_failed: msm_slim_sps_exit(dev); err_sps_init_failed: err_of_init_failed: iounmap(dev->bam.base); err_ioremap_bam_failed: iounmap(dev->base); err_ioremap_failed: kfree(dev); err_get_res_failed: release_mem_region(bam_mem->start, resource_size(bam_mem)); err_get_res_bam_failed: release_mem_region(slim_mem->start, resource_size(slim_mem)); return ret; } static int __devexit msm_slim_remove(struct platform_device *pdev) { struct msm_slim_ctrl *dev = platform_get_drvdata(pdev); struct resource *bam_mem; struct resource *slim_mem; struct resource *slew_mem = dev->slew_mem; int i; for (i = 0; i < dev->nsats; i++) { struct msm_slim_sat *sat = dev->satd[i]; int j; for (j = 0; j < sat->nsatch; j++) slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh); slim_remove_device(&sat->satcl); kfree(sat->satch); destroy_workqueue(sat->wq); kfree(sat->satcl.name); kfree(sat); } pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); free_irq(dev->irq, dev); slim_del_controller(&dev->ctrl); clk_put(dev->rclk); msm_slim_sps_exit(dev); kthread_stop(dev->rx_msgq_thread); iounmap(dev->bam.base); iounmap(dev->base); kfree(dev); bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimbus_bam_physical"); if (bam_mem) release_mem_region(bam_mem->start, resource_size(bam_mem)); if (slew_mem) release_mem_region(slew_mem->start, resource_size(slew_mem)); slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimbus_physical"); if (slim_mem) release_mem_region(slim_mem->start, resource_size(slim_mem)); return 0; } #ifdef CONFIG_PM_RUNTIME static int msm_slim_runtime_idle(struct device *device) { dev_dbg(device, "pm_runtime: idle...\n"); pm_request_autosuspend(device); return -EAGAIN; } #endif /* * If PM_RUNTIME is not defined, these 2 functions become helper * functions to be called from system suspend/resume. So they are not * inside ifdef CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM_SLEEP static int msm_slim_runtime_suspend(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct msm_slim_ctrl *dev = platform_get_drvdata(pdev); int ret; dev_dbg(device, "pm_runtime: suspending...\n"); dev->state = MSM_CTRL_SLEEPING; ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED); if (ret) { dev_err(device, "clk pause not entered:%d", ret); dev->state = MSM_CTRL_AWAKE; } else { dev->state = MSM_CTRL_ASLEEP; } return ret; } static int msm_slim_runtime_resume(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct msm_slim_ctrl *dev = platform_get_drvdata(pdev); int ret = 0; dev_dbg(device, "pm_runtime: resuming...\n"); if (dev->state == MSM_CTRL_ASLEEP) ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0); if (ret) { dev_err(device, "clk pause not exited:%d", ret); dev->state = MSM_CTRL_ASLEEP; } else { dev->state = MSM_CTRL_AWAKE; } return ret; } static int msm_slim_suspend(struct device *dev) { int ret = 0; if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) { dev_dbg(dev, "system suspend"); ret = msm_slim_runtime_suspend(dev); } if (ret == -EBUSY) { /* * If the clock pause failed due to active channels, there is * a possibility that some audio stream is active during suspend * We dont want to return suspend failure in that case so that * display and relevant components can still go to suspend. * If there is some other error, then it should be passed-on * to system level suspend */ ret = 0; } return ret; } static int msm_slim_resume(struct device *dev) { /* If runtime_pm is enabled, this resume shouldn't do anything */ if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) { int ret; dev_dbg(dev, "system resume"); ret = msm_slim_runtime_resume(dev); if (!ret) { pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); } return ret; } return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops msm_slim_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS( msm_slim_suspend, msm_slim_resume ) SET_RUNTIME_PM_OPS( msm_slim_runtime_suspend, msm_slim_runtime_resume, msm_slim_runtime_idle ) }; static struct of_device_id msm_slim_dt_match[] = { { .compatible = "qcom,slim-msm", }, {} }; static struct platform_driver msm_slim_driver = { .probe = msm_slim_probe, .remove = msm_slim_remove, .driver = { .name = MSM_SLIM_NAME, .owner = THIS_MODULE, .pm = &msm_slim_dev_pm_ops, .of_match_table = msm_slim_dt_match, }, }; static int msm_slim_init(void) { return platform_driver_register(&msm_slim_driver); } subsys_initcall(msm_slim_init); static void msm_slim_exit(void) { platform_driver_unregister(&msm_slim_driver); } module_exit(msm_slim_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1"); MODULE_DESCRIPTION("MSM Slimbus controller"); MODULE_ALIAS("platform:msm-slim");
gpl-2.0
frustreated/linux
sound/pci/echoaudio/indigoiox.c
550
2385
// SPDX-License-Identifier: GPL-2.0-only /* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2009 Giuliano Pochini <pochini@shiny.it> */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_IOX #define ECHOCARD_NAME "Indigo IOx" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 2 */ #define PX_DIGITAL_IN 10 /* 0 */ #define PX_NUM 10 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 0 */ #define BX_ANALOG_IN 2 /* 2 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_iox_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_IOX_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_iox_dsp.fw"} }; static const struct pci_device_id snd_echo_ids[] = { {0x1057, 0x3410, 0xECC0, 0x00D0, 0, 0, 0}, /* Indigo IOx */ {0,} }; static const struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigoiox_dsp.c" #include "indigo_express_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
drod2169/Linux-3.13.x
drivers/usb/gadget/ncm.c
550
5773
/* * ncm.c -- NCM gadget driver * * Copyright (C) 2010 Nokia Corporation * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com> * * The driver borrows from ether.c which is: * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define DEBUG */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb/composite.h> #include "u_ether.h" #include "u_ncm.h" #define DRIVER_DESC "NCM Gadget" /*-------------------------------------------------------------------------*/ /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ /* Thanks to NetChip Technologies for donating this product ID. * It's for devices with only CDC Ethernet configurations. */ #define CDC_VENDOR_NUM 0x0525 /* NetChip */ #define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */ /*-------------------------------------------------------------------------*/ USB_GADGET_COMPOSITE_OPTIONS(); USB_ETHERNET_MODULE_PARAMETERS(); static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16 (0x0200), .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id defaults change according to what configs * we support. (As does bNumConfigurations.) These values can * also be overridden by module parameters. */ .idVendor = cpu_to_le16 (CDC_VENDOR_NUM), .idProduct = cpu_to_le16 (CDC_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ static struct usb_string strings_dev[] = { [USB_GADGET_MANUFACTURER_IDX].s = "", [USB_GADGET_PRODUCT_IDX].s = DRIVER_DESC, [USB_GADGET_SERIAL_IDX].s = "", { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static struct usb_function_instance *f_ncm_inst; static struct usb_function *f_ncm; /*-------------------------------------------------------------------------*/ static int __init ncm_do_config(struct usb_configuration *c) { int status; /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } f_ncm = usb_get_function(f_ncm_inst); if (IS_ERR(f_ncm)) { status = PTR_ERR(f_ncm); return status; } status = usb_add_function(c, f_ncm); if (status < 0) { usb_put_function(f_ncm); return status; } return 0; } static struct usb_configuration ncm_config_driver = { /* .label = f(hardware) */ .label = "CDC Ethernet (NCM)", .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int __init gncm_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct f_ncm_opts *ncm_opts; int status; f_ncm_inst = usb_get_function_instance("ncm"); if (IS_ERR(f_ncm_inst)) return PTR_ERR(f_ncm_inst); ncm_opts = container_of(f_ncm_inst, struct f_ncm_opts, func_inst); gether_set_qmult(ncm_opts->net, qmult); if (!gether_set_host_addr(ncm_opts->net, host_addr)) pr_info("using host ethernet address: %s", host_addr); if (!gether_set_dev_addr(ncm_opts->net, dev_addr)) pr_info("using self ethernet address: %s", dev_addr); /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ status = usb_string_ids_tab(cdev, strings_dev); if (status < 0) goto fail; device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id; device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id; status = usb_add_config(cdev, &ncm_config_driver, ncm_do_config); if (status < 0) goto fail; usb_composite_overwrite_options(cdev, &coverwrite); dev_info(&gadget->dev, "%s\n", DRIVER_DESC); return 0; fail: usb_put_function_instance(f_ncm_inst); return status; } static int __exit gncm_unbind(struct usb_composite_dev *cdev) { if (!IS_ERR_OR_NULL(f_ncm)) usb_put_function(f_ncm); if (!IS_ERR_OR_NULL(f_ncm_inst)) usb_put_function_instance(f_ncm_inst); return 0; } static __refdata struct usb_composite_driver ncm_driver = { .name = "g_ncm", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = gncm_bind, .unbind = __exit_p(gncm_unbind), }; MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Yauheni Kaliuta"); MODULE_LICENSE("GPL"); static int __init init(void) { return usb_composite_probe(&ncm_driver); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&ncm_driver); } module_exit(cleanup);
gpl-2.0
Ca1ne/Classic-Sense-Kernel
arch/microblaze/kernel/prom_parse.c
806
25971
#undef DEBUG #include <linux/kernel.h> #include <linux/string.h> #include <linux/pci_regs.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/etherdevice.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #define PRu64 "%llx" /* Max address size we deal with */ #define OF_MAX_ADDR_CELLS 4 #define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ (ns) > 0) static struct of_bus *of_match_bus(struct device_node *np); static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, u64 size, unsigned int flags, struct resource *r); /* Debug utility */ #ifdef DEBUG static void of_dump_addr(const char *s, const u32 *addr, int na) { printk(KERN_INFO "%s", s); while (na--) printk(KERN_INFO " %08x", *(addr++)); printk(KERN_INFO "\n"); } #else static void of_dump_addr(const char *s, const u32 *addr, int na) { } #endif /* Callbacks for bus specific translators */ struct of_bus { const char *name; const char *addresses; int (*match)(struct device_node *parent); void (*count_cells)(struct device_node *child, int *addrc, int *sizec); u64 (*map)(u32 *addr, const u32 *range, int na, int ns, int pna); int (*translate)(u32 *addr, u64 offset, int na); unsigned int (*get_flags)(const u32 *addr); }; /* * Default translator (generic bus) */ static void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec) { if (addrc) *addrc = of_n_addr_cells(dev); if (sizec) *sizec = of_n_size_cells(dev); } static u64 of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna) { u64 cp, s, da; cp = of_read_number(range, na); s = of_read_number(range + na + pna, ns); da = of_read_number(addr, na); pr_debug("OF: default map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); if (da < cp || da >= (cp + s)) return OF_BAD_ADDR; return da - cp; } static int of_bus_default_translate(u32 *addr, u64 offset, int na) { u64 a = of_read_number(addr, na); memset(addr, 0, na * 4); a += offset; if (na > 1) addr[na - 2] = a >> 32; addr[na - 1] = a & 0xffffffffu; return 0; } static unsigned int of_bus_default_get_flags(const u32 *addr) { return IORESOURCE_MEM; } #ifdef CONFIG_PCI /* * PCI bus specific translator */ static int of_bus_pci_match(struct device_node *np) { /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */ return !strcmp(np->type, "pci") || !strcmp(np->type, "vci"); } static void of_bus_pci_count_cells(struct device_node *np, int *addrc, int *sizec) { if (addrc) *addrc = 3; if (sizec) *sizec = 2; } static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna) { u64 cp, s, da; /* Check address type match */ if ((addr[0] ^ range[0]) & 0x03000000) return OF_BAD_ADDR; /* Read address values, skipping high cell */ cp = of_read_number(range + 1, na - 1); s = of_read_number(range + na + pna, ns); da = of_read_number(addr + 1, na - 1); pr_debug("OF: PCI map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); if (da < cp || da >= (cp + s)) return OF_BAD_ADDR; return da - cp; } static int of_bus_pci_translate(u32 *addr, u64 offset, int na) { return of_bus_default_translate(addr + 1, offset, na - 1); } static unsigned int of_bus_pci_get_flags(const u32 *addr) { unsigned int flags = 0; u32 w = addr[0]; switch ((w >> 24) & 0x03) { case 0x01: flags |= IORESOURCE_IO; break; case 0x02: /* 32 bits */ case 0x03: /* 64 bits */ flags |= IORESOURCE_MEM; break; } if (w & 0x40000000) flags |= IORESOURCE_PREFETCH; return flags; } const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, unsigned int *flags) { const u32 *prop; unsigned int psize; struct device_node *parent; struct of_bus *bus; int onesize, i, na, ns; /* Get parent & match bus type */ parent = of_get_parent(dev); if (parent == NULL) return NULL; bus = of_match_bus(parent); if (strcmp(bus->name, "pci")) { of_node_put(parent); return NULL; } bus->count_cells(dev, &na, &ns); of_node_put(parent); if (!OF_CHECK_COUNTS(na, ns)) return NULL; /* Get "reg" or "assigned-addresses" property */ prop = of_get_property(dev, bus->addresses, &psize); if (prop == NULL) return NULL; psize /= 4; onesize = na + ns; for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { if (size) *size = of_read_number(prop + na, ns); if (flags) *flags = bus->get_flags(prop); return prop; } return NULL; } EXPORT_SYMBOL(of_get_pci_address); int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r) { const u32 *addrp; u64 size; unsigned int flags; addrp = of_get_pci_address(dev, bar, &size, &flags); if (addrp == NULL) return -EINVAL; return __of_address_to_resource(dev, addrp, size, flags, r); } EXPORT_SYMBOL_GPL(of_pci_address_to_resource); static u8 of_irq_pci_swizzle(u8 slot, u8 pin) { return (((pin - 1) + slot) % 4) + 1; } int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) { struct device_node *dn, *ppnode; struct pci_dev *ppdev; u32 lspec; u32 laddr[3]; u8 pin; int rc; /* Check if we have a device node, if yes, fallback to standard OF * parsing */ dn = pci_device_to_OF_node(pdev); if (dn) return of_irq_map_one(dn, 0, out_irq); /* Ok, we don't, time to have fun. Let's start by building up an * interrupt spec. we assume #interrupt-cells is 1, which is standard * for PCI. If you do different, then don't use that routine. */ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); if (rc != 0) return rc; /* No pin, exit */ if (pin == 0) return -ENODEV; /* Now we walk up the PCI tree */ lspec = pin; for (;;) { /* Get the pci_dev of our parent */ ppdev = pdev->bus->self; /* Ouch, it's a host bridge... */ if (ppdev == NULL) { struct pci_controller *host; host = pci_bus_to_host(pdev->bus); ppnode = host ? host->dn : NULL; /* No node for host bridge ? give up */ if (ppnode == NULL) return -EINVAL; } else /* We found a P2P bridge, check if it has a node */ ppnode = pci_device_to_OF_node(ppdev); /* Ok, we have found a parent with a device-node, hand over to * the OF parsing code. * We build a unit address from the linux device to be used for * resolution. Note that we use the linux bus number which may * not match your firmware bus numbering. * Fortunately, in most cases, interrupt-map-mask doesn't * include the bus number as part of the matching. * You should still be careful about that though if you intend * to rely on this function (you ship a firmware that doesn't * create device nodes for all PCI devices). */ if (ppnode) break; /* We can only get here if we hit a P2P bridge with no node, * let's do standard swizzling and try again */ lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); pdev = ppdev; } laddr[0] = (pdev->bus->number << 16) | (pdev->devfn << 8); laddr[1] = laddr[2] = 0; return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); } EXPORT_SYMBOL_GPL(of_irq_map_pci); #endif /* CONFIG_PCI */ /* * ISA bus specific translator */ static int of_bus_isa_match(struct device_node *np) { return !strcmp(np->name, "isa"); } static void of_bus_isa_count_cells(struct device_node *child, int *addrc, int *sizec) { if (addrc) *addrc = 2; if (sizec) *sizec = 1; } static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna) { u64 cp, s, da; /* Check address type match */ if ((addr[0] ^ range[0]) & 0x00000001) return OF_BAD_ADDR; /* Read address values, skipping high cell */ cp = of_read_number(range + 1, na - 1); s = of_read_number(range + na + pna, ns); da = of_read_number(addr + 1, na - 1); pr_debug("OF: ISA map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); if (da < cp || da >= (cp + s)) return OF_BAD_ADDR; return da - cp; } static int of_bus_isa_translate(u32 *addr, u64 offset, int na) { return of_bus_default_translate(addr + 1, offset, na - 1); } static unsigned int of_bus_isa_get_flags(const u32 *addr) { unsigned int flags = 0; u32 w = addr[0]; if (w & 1) flags |= IORESOURCE_IO; else flags |= IORESOURCE_MEM; return flags; } /* * Array of bus specific translators */ static struct of_bus of_busses[] = { #ifdef CONFIG_PCI /* PCI */ { .name = "pci", .addresses = "assigned-addresses", .match = of_bus_pci_match, .count_cells = of_bus_pci_count_cells, .map = of_bus_pci_map, .translate = of_bus_pci_translate, .get_flags = of_bus_pci_get_flags, }, #endif /* CONFIG_PCI */ /* ISA */ { .name = "isa", .addresses = "reg", .match = of_bus_isa_match, .count_cells = of_bus_isa_count_cells, .map = of_bus_isa_map, .translate = of_bus_isa_translate, .get_flags = of_bus_isa_get_flags, }, /* Default */ { .name = "default", .addresses = "reg", .match = NULL, .count_cells = of_bus_default_count_cells, .map = of_bus_default_map, .translate = of_bus_default_translate, .get_flags = of_bus_default_get_flags, }, }; static struct of_bus *of_match_bus(struct device_node *np) { int i; for (i = 0; i < ARRAY_SIZE(of_busses); i++) if (!of_busses[i].match || of_busses[i].match(np)) return &of_busses[i]; BUG(); return NULL; } static int of_translate_one(struct device_node *parent, struct of_bus *bus, struct of_bus *pbus, u32 *addr, int na, int ns, int pna) { const u32 *ranges; unsigned int rlen; int rone; u64 offset = OF_BAD_ADDR; /* Normally, an absence of a "ranges" property means we are * crossing a non-translatable boundary, and thus the addresses * below the current not cannot be converted to CPU physical ones. * Unfortunately, while this is very clear in the spec, it's not * what Apple understood, and they do have things like /uni-n or * /ht nodes with no "ranges" property and a lot of perfectly * useable mapped devices below them. Thus we treat the absence of * "ranges" as equivalent to an empty "ranges" property which means * a 1:1 translation at that level. It's up to the caller not to try * to translate addresses that aren't supposed to be translated in * the first place. --BenH. */ ranges = of_get_property(parent, "ranges", (int *) &rlen); if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); pr_debug("OF: no ranges, 1:1 translation\n"); goto finish; } pr_debug("OF: walking ranges...\n"); /* Now walk through the ranges */ rlen /= 4; rone = na + pna + ns; for (; rlen >= rone; rlen -= rone, ranges += rone) { offset = bus->map(addr, ranges, na, ns, pna); if (offset != OF_BAD_ADDR) break; } if (offset == OF_BAD_ADDR) { pr_debug("OF: not found !\n"); return 1; } memcpy(addr, ranges + na, 4 * pna); finish: of_dump_addr("OF: parent translation for:", addr, pna); pr_debug("OF: with offset: "PRu64"\n", offset); /* Translate it into parent bus space */ return pbus->translate(addr, offset, pna); } /* * Translate an address from the device-tree into a CPU physical address, * this walks up the tree and applies the various bus mappings on the * way. * * Note: We consider that crossing any level with #size-cells == 0 to mean * that translation is impossible (that is we are not dealing with a value * that can be mapped to a cpu physical address). This is not really specified * that way, but this is traditionally the way IBM at least do things */ u64 of_translate_address(struct device_node *dev, const u32 *in_addr) { struct device_node *parent = NULL; struct of_bus *bus, *pbus; u32 addr[OF_MAX_ADDR_CELLS]; int na, ns, pna, pns; u64 result = OF_BAD_ADDR; pr_debug("OF: ** translation for device %s **\n", dev->full_name); /* Increase refcount at current level */ of_node_get(dev); /* Get parent & match bus type */ parent = of_get_parent(dev); if (parent == NULL) goto bail; bus = of_match_bus(parent); /* Cound address cells & copy address locally */ bus->count_cells(dev, &na, &ns); if (!OF_CHECK_COUNTS(na, ns)) { printk(KERN_ERR "prom_parse: Bad cell count for %s\n", dev->full_name); goto bail; } memcpy(addr, in_addr, na * 4); pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n", bus->name, na, ns, parent->full_name); of_dump_addr("OF: translating address:", addr, na); /* Translate */ for (;;) { /* Switch to parent bus */ of_node_put(dev); dev = parent; parent = of_get_parent(dev); /* If root, we have finished */ if (parent == NULL) { pr_debug("OF: reached root node\n"); result = of_read_number(addr, na); break; } /* Get new parent bus and counts */ pbus = of_match_bus(parent); pbus->count_cells(dev, &pna, &pns); if (!OF_CHECK_COUNTS(pna, pns)) { printk(KERN_ERR "prom_parse: Bad cell count for %s\n", dev->full_name); break; } pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n", pbus->name, pna, pns, parent->full_name); /* Apply bus translation */ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna)) break; /* Complete the move up one level */ na = pna; ns = pns; bus = pbus; of_dump_addr("OF: one level translation:", addr, na); } bail: of_node_put(parent); of_node_put(dev); return result; } EXPORT_SYMBOL(of_translate_address); const u32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags) { const u32 *prop; unsigned int psize; struct device_node *parent; struct of_bus *bus; int onesize, i, na, ns; /* Get parent & match bus type */ parent = of_get_parent(dev); if (parent == NULL) return NULL; bus = of_match_bus(parent); bus->count_cells(dev, &na, &ns); of_node_put(parent); if (!OF_CHECK_COUNTS(na, ns)) return NULL; /* Get "reg" or "assigned-addresses" property */ prop = of_get_property(dev, bus->addresses, (int *) &psize); if (prop == NULL) return NULL; psize /= 4; onesize = na + ns; for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) if (i == index) { if (size) *size = of_read_number(prop + na, ns); if (flags) *flags = bus->get_flags(prop); return prop; } return NULL; } EXPORT_SYMBOL(of_get_address); static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, u64 size, unsigned int flags, struct resource *r) { u64 taddr; if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) return -EINVAL; taddr = of_translate_address(dev, addrp); if (taddr == OF_BAD_ADDR) return -EINVAL; memset(r, 0, sizeof(struct resource)); if (flags & IORESOURCE_IO) { unsigned long port; port = -1; /* pci_address_to_pio(taddr); */ if (port == (unsigned long)-1) return -EINVAL; r->start = port; r->end = port + size - 1; } else { r->start = taddr; r->end = taddr + size - 1; } r->flags = flags; r->name = dev->name; return 0; } int of_address_to_resource(struct device_node *dev, int index, struct resource *r) { const u32 *addrp; u64 size; unsigned int flags; addrp = of_get_address(dev, index, &size, &flags); if (addrp == NULL) return -EINVAL; return __of_address_to_resource(dev, addrp, size, flags, r); } EXPORT_SYMBOL_GPL(of_address_to_resource); void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) { const u32 *dma_window; u32 cells; const unsigned char *prop; dma_window = dma_window_prop; /* busno is always one cell */ *busno = *(dma_window++); prop = of_get_property(dn, "ibm,#dma-address-cells", NULL); if (!prop) prop = of_get_property(dn, "#address-cells", NULL); cells = prop ? *(u32 *)prop : of_n_addr_cells(dn); *phys = of_read_number(dma_window, cells); dma_window += cells; prop = of_get_property(dn, "ibm,#dma-size-cells", NULL); cells = prop ? *(u32 *)prop : of_n_size_cells(dn); *size = of_read_number(dma_window, cells); } /* * Interrupt remapper */ static unsigned int of_irq_workarounds; static struct device_node *of_irq_dflt_pic; static struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; const phandle *parp; if (!of_node_get(child)) return NULL; do { parp = of_get_property(child, "interrupt-parent", NULL); if (parp == NULL) p = of_get_parent(child); else { if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) p = of_node_get(of_irq_dflt_pic); else p = of_find_node_by_phandle(*parp); } of_node_put(child); child = p; } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); return p; } /* This doesn't need to be called if you don't have any special workaround * flags to pass */ void of_irq_map_init(unsigned int flags) { of_irq_workarounds = flags; /* OldWorld, don't bother looking at other things */ if (flags & OF_IMAP_OLDWORLD_MAC) return; /* If we don't have phandles, let's try to locate a default interrupt * controller (happens when booting with BootX). We do a first match * here, hopefully, that only ever happens on machines with one * controller. */ if (flags & OF_IMAP_NO_PHANDLE) { struct device_node *np; for (np = NULL; (np = of_find_all_nodes(np)) != NULL;) { if (of_get_property(np, "interrupt-controller", NULL) == NULL) continue; /* Skip /chosen/interrupt-controller */ if (strcmp(np->name, "chosen") == 0) continue; /* It seems like at least one person on this planet * wants to use BootX on a machine with an AppleKiwi * controller which happens to pretend to be an * interrupt controller too. */ if (strcmp(np->name, "AppleKiwi") == 0) continue; /* I think we found one ! */ of_irq_dflt_pic = np; break; } } } int of_irq_map_raw(struct device_node *parent, const u32 *intspec, u32 ointsize, const u32 *addr, struct of_irq *out_irq) { struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; const u32 *tmp, *imap, *imask; u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; int imaplen, match, i; pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...]," "ointsize=%d\n", parent->full_name, intspec[0], intspec[1], ointsize); ipar = of_node_get(parent); /* First get the #interrupt-cells property of the current cursor * that tells us how to interpret the passed-in intspec. If there * is none, we are nice and just walk up the tree */ do { tmp = of_get_property(ipar, "#interrupt-cells", NULL); if (tmp != NULL) { intsize = *tmp; break; } tnode = ipar; ipar = of_irq_find_parent(ipar); of_node_put(tnode); } while (ipar); if (ipar == NULL) { pr_debug(" -> no parent found !\n"); goto fail; } pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); if (ointsize != intsize) return -EINVAL; /* Look for this #address-cells. We have to implement the old linux * trick of looking for the parent here as some device-trees rely on it */ old = of_node_get(ipar); do { tmp = of_get_property(old, "#address-cells", NULL); tnode = of_get_parent(old); of_node_put(old); old = tnode; } while (old && tmp == NULL); of_node_put(old); old = NULL; addrsize = (tmp == NULL) ? 2 : *tmp; pr_debug(" -> addrsize=%d\n", addrsize); /* Now start the actual "proper" walk of the interrupt tree */ while (ipar != NULL) { /* Now check if cursor is an interrupt-controller and if it is * then we are done */ if (of_get_property(ipar, "interrupt-controller", NULL) != NULL) { pr_debug(" -> got it !\n"); memcpy(out_irq->specifier, intspec, intsize * sizeof(u32)); out_irq->size = intsize; out_irq->controller = ipar; of_node_put(old); return 0; } /* Now look for an interrupt-map */ imap = of_get_property(ipar, "interrupt-map", &imaplen); /* No interrupt map, check for an interrupt parent */ if (imap == NULL) { pr_debug(" -> no map, getting parent\n"); newpar = of_irq_find_parent(ipar); goto skiplevel; } imaplen /= sizeof(u32); /* Look for a mask */ imask = of_get_property(ipar, "interrupt-map-mask", NULL); /* If we were passed no "reg" property and we attempt to parse * an interrupt-map, then #address-cells must be 0. * Fail if it's not. */ if (addr == NULL && addrsize != 0) { pr_debug(" -> no reg passed in when needed !\n"); goto fail; } /* Parse interrupt-map */ match = 0; while (imaplen > (addrsize + intsize + 1) && !match) { /* Compare specifiers */ match = 1; for (i = 0; i < addrsize && match; ++i) { u32 mask = imask ? imask[i] : 0xffffffffu; match = ((addr[i] ^ imap[i]) & mask) == 0; } for (; i < (addrsize + intsize) && match; ++i) { u32 mask = imask ? imask[i] : 0xffffffffu; match = ((intspec[i-addrsize] ^ imap[i]) & mask) == 0; } imap += addrsize + intsize; imaplen -= addrsize + intsize; pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); /* Get the interrupt parent */ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) newpar = of_node_get(of_irq_dflt_pic); else newpar = of_find_node_by_phandle((phandle)*imap); imap++; --imaplen; /* Check if not found */ if (newpar == NULL) { pr_debug(" -> imap parent not found !\n"); goto fail; } /* Get #interrupt-cells and #address-cells of new * parent */ tmp = of_get_property(newpar, "#interrupt-cells", NULL); if (tmp == NULL) { pr_debug(" -> parent lacks " "#interrupt-cells!\n"); goto fail; } newintsize = *tmp; tmp = of_get_property(newpar, "#address-cells", NULL); newaddrsize = (tmp == NULL) ? 0 : *tmp; pr_debug(" -> newintsize=%d, newaddrsize=%d\n", newintsize, newaddrsize); /* Check for malformed properties */ if (imaplen < (newaddrsize + newintsize)) goto fail; imap += newaddrsize + newintsize; imaplen -= newaddrsize + newintsize; pr_debug(" -> imaplen=%d\n", imaplen); } if (!match) goto fail; of_node_put(old); old = of_node_get(newpar); addrsize = newaddrsize; intsize = newintsize; intspec = imap - intsize; addr = intspec - addrsize; skiplevel: /* Iterate again with new parent */ pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>"); of_node_put(ipar); ipar = newpar; newpar = NULL; } fail: of_node_put(ipar); of_node_put(old); of_node_put(newpar); return -EINVAL; } EXPORT_SYMBOL_GPL(of_irq_map_raw); int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq) { struct device_node *p; const u32 *intspec, *tmp, *addr; u32 intsize, intlen; int res; pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index); /* Get the interrupts property */ intspec = of_get_property(device, "interrupts", (int *) &intlen); if (intspec == NULL) return -EINVAL; intlen /= sizeof(u32); pr_debug(" intspec=%d intlen=%d\n", *intspec, intlen); /* Get the reg property (if any) */ addr = of_get_property(device, "reg", NULL); /* Look for the interrupt parent. */ p = of_irq_find_parent(device); if (p == NULL) return -EINVAL; /* Get size of interrupt specifier */ tmp = of_get_property(p, "#interrupt-cells", NULL); if (tmp == NULL) { of_node_put(p); return -EINVAL; } intsize = *tmp; pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); /* Check index */ if ((index + 1) * intsize > intlen) return -EINVAL; /* Get new specifier and map it */ res = of_irq_map_raw(p, intspec + index * intsize, intsize, addr, out_irq); of_node_put(p); return res; } EXPORT_SYMBOL_GPL(of_irq_map_one); /** * Search the device tree for the best MAC address to use. 'mac-address' is * checked first, because that is supposed to contain to "most recent" MAC * address. If that isn't set, then 'local-mac-address' is checked next, * because that is the default address. If that isn't set, then the obsolete * 'address' is checked, just in case we're using an old device tree. * * Note that the 'address' property is supposed to contain a virtual address of * the register set, but some DTS files have redefined that property to be the * MAC address. * * All-zero MAC addresses are rejected, because those could be properties that * exist in the device tree, but were not set by U-Boot. For example, the * DTS could define 'mac-address' and 'local-mac-address', with zero MAC * addresses. Some older U-Boots only initialized 'local-mac-address'. In * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists * but is all zeros. */ const void *of_get_mac_address(struct device_node *np) { struct property *pp; pp = of_find_property(np, "mac-address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; pp = of_find_property(np, "local-mac-address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; pp = of_find_property(np, "address", NULL); if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) return pp->value; return NULL; } EXPORT_SYMBOL(of_get_mac_address); int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) { struct of_irq out_irq; int irq; int res; res = of_irq_map_one(dev, index, &out_irq); /* Get irq for the device */ if (res) { pr_debug("IRQ not found... code = %d", res); return NO_IRQ; } /* Assuming single interrupt controller... */ irq = out_irq.specifier[0]; pr_debug("IRQ found = %d", irq); /* Only dereference the resource if both the * resource and the irq are valid. */ if (r && irq != NO_IRQ) { r->start = r->end = irq; r->flags = IORESOURCE_IRQ; } return irq; } EXPORT_SYMBOL_GPL(of_irq_to_resource); void __iomem *of_iomap(struct device_node *np, int index) { struct resource res; if (of_address_to_resource(np, index, &res)) return NULL; return ioremap(res.start, 1 + res.end - res.start); } EXPORT_SYMBOL(of_iomap);
gpl-2.0
ArtemTeleshev/linux
drivers/misc/eeprom/at25.c
806
11933
/* * at25.c -- support most SPI EEPROMs, such as Atmel AT25 models * * Copyright (C) 2006 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/sched.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> #include <linux/property.h> /* * NOTE: this is an *EEPROM* driver. The vagaries of product naming * mean that some AT25 products are EEPROMs, and others are FLASH. * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver, * not this one! */ struct at25_data { struct spi_device *spi; struct memory_accessor mem; struct mutex lock; struct spi_eeprom chip; struct bin_attribute bin; unsigned addrlen; }; #define AT25_WREN 0x06 /* latch the write enable */ #define AT25_WRDI 0x04 /* reset the write enable */ #define AT25_RDSR 0x05 /* read status register */ #define AT25_WRSR 0x01 /* write status register */ #define AT25_READ 0x03 /* read byte(s) */ #define AT25_WRITE 0x02 /* write byte(s)/sector */ #define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */ #define AT25_SR_WEN 0x02 /* write enable (latched) */ #define AT25_SR_BP0 0x04 /* BP for software writeprotect */ #define AT25_SR_BP1 0x08 #define AT25_SR_WPEN 0x80 /* writeprotect enable */ #define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */ #define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ /* Specs often allow 5 msec for a page write, sometimes 20 msec; * it's important to recover from write timeouts. */ #define EE_TIMEOUT 25 /*-------------------------------------------------------------------------*/ #define io_limit PAGE_SIZE /* bytes */ static ssize_t at25_ee_read( struct at25_data *at25, char *buf, unsigned offset, size_t count ) { u8 command[EE_MAXADDRLEN + 1]; u8 *cp; ssize_t status; struct spi_transfer t[2]; struct spi_message m; u8 instr; if (unlikely(offset >= at25->bin.size)) return 0; if ((offset + count) > at25->bin.size) count = at25->bin.size - offset; if (unlikely(!count)) return count; cp = command; instr = AT25_READ; if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) if (offset >= (1U << (at25->addrlen * 8))) instr |= AT25_INSTR_BIT3; *cp++ = instr; /* 8/16/24-bit address is written MSB first */ switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; case 2: *cp++ = offset >> 8; case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; } spi_message_init(&m); memset(t, 0, sizeof t); t[0].tx_buf = command; t[0].len = at25->addrlen + 1; spi_message_add_tail(&t[0], &m); t[1].rx_buf = buf; t[1].len = count; spi_message_add_tail(&t[1], &m); mutex_lock(&at25->lock); /* Read it all at once. * * REVISIT that's potentially a problem with large chips, if * other devices on the bus need to be accessed regularly or * this chip is clocked very slowly */ status = spi_sync(at25->spi, &m); dev_dbg(&at25->spi->dev, "read %Zd bytes at %d --> %d\n", count, offset, (int) status); mutex_unlock(&at25->lock); return status ? status : count; } static ssize_t at25_bin_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev; struct at25_data *at25; dev = container_of(kobj, struct device, kobj); at25 = dev_get_drvdata(dev); return at25_ee_read(at25, buf, off, count); } static ssize_t at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, size_t count) { ssize_t status = 0; unsigned written = 0; unsigned buf_size; u8 *bounce; if (unlikely(off >= at25->bin.size)) return -EFBIG; if ((off + count) > at25->bin.size) count = at25->bin.size - off; if (unlikely(!count)) return count; /* Temp buffer starts with command and address */ buf_size = at25->chip.page_size; if (buf_size > io_limit) buf_size = io_limit; bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL); if (!bounce) return -ENOMEM; /* For write, rollover is within the page ... so we write at * most one page, then manually roll over to the next page. */ mutex_lock(&at25->lock); do { unsigned long timeout, retries; unsigned segment; unsigned offset = (unsigned) off; u8 *cp = bounce; int sr; u8 instr; *cp = AT25_WREN; status = spi_write(at25->spi, cp, 1); if (status < 0) { dev_dbg(&at25->spi->dev, "WREN --> %d\n", (int) status); break; } instr = AT25_WRITE; if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) if (offset >= (1U << (at25->addrlen * 8))) instr |= AT25_INSTR_BIT3; *cp++ = instr; /* 8/16/24-bit address is written MSB first */ switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; case 2: *cp++ = offset >> 8; case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; } /* Write as much of a page as we can */ segment = buf_size - (offset % buf_size); if (segment > count) segment = count; memcpy(cp, buf, segment); status = spi_write(at25->spi, bounce, segment + at25->addrlen + 1); dev_dbg(&at25->spi->dev, "write %u bytes at %u --> %d\n", segment, offset, (int) status); if (status < 0) break; /* REVISIT this should detect (or prevent) failed writes * to readonly sections of the EEPROM... */ /* Wait for non-busy status */ timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); retries = 0; do { sr = spi_w8r8(at25->spi, AT25_RDSR); if (sr < 0 || (sr & AT25_SR_nRDY)) { dev_dbg(&at25->spi->dev, "rdsr --> %d (%02x)\n", sr, sr); /* at HZ=100, this is sloooow */ msleep(1); continue; } if (!(sr & AT25_SR_nRDY)) break; } while (retries++ < 3 || time_before_eq(jiffies, timeout)); if ((sr < 0) || (sr & AT25_SR_nRDY)) { dev_err(&at25->spi->dev, "write %d bytes offset %d, " "timeout after %u msecs\n", segment, offset, jiffies_to_msecs(jiffies - (timeout - EE_TIMEOUT))); status = -ETIMEDOUT; break; } off += segment; buf += segment; count -= segment; written += segment; } while (count > 0); mutex_unlock(&at25->lock); kfree(bounce); return written ? written : status; } static ssize_t at25_bin_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev; struct at25_data *at25; dev = container_of(kobj, struct device, kobj); at25 = dev_get_drvdata(dev); return at25_ee_write(at25, buf, off, count); } /*-------------------------------------------------------------------------*/ /* Let in-kernel code access the eeprom data. */ static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf, off_t offset, size_t count) { struct at25_data *at25 = container_of(mem, struct at25_data, mem); return at25_ee_read(at25, buf, offset, count); } static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf, off_t offset, size_t count) { struct at25_data *at25 = container_of(mem, struct at25_data, mem); return at25_ee_write(at25, buf, offset, count); } /*-------------------------------------------------------------------------*/ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) { u32 val; memset(chip, 0, sizeof(*chip)); strncpy(chip->name, "at25", sizeof(chip->name)); if (device_property_read_u32(dev, "size", &val) == 0 || device_property_read_u32(dev, "at25,byte-len", &val) == 0) { chip->byte_len = val; } else { dev_err(dev, "Error: missing \"size\" property\n"); return -ENODEV; } if (device_property_read_u32(dev, "pagesize", &val) == 0 || device_property_read_u32(dev, "at25,page-size", &val) == 0) { chip->page_size = (u16)val; } else { dev_err(dev, "Error: missing \"pagesize\" property\n"); return -ENODEV; } if (device_property_read_u32(dev, "at25,addr-mode", &val) == 0) { chip->flags = (u16)val; } else { if (device_property_read_u32(dev, "address-width", &val)) { dev_err(dev, "Error: missing \"address-width\" property\n"); return -ENODEV; } switch (val) { case 8: chip->flags |= EE_ADDR1; break; case 16: chip->flags |= EE_ADDR2; break; case 24: chip->flags |= EE_ADDR3; break; default: dev_err(dev, "Error: bad \"address-width\" property: %u\n", val); return -ENODEV; } if (device_property_present(dev, "read-only")) chip->flags |= EE_READONLY; } return 0; } static int at25_probe(struct spi_device *spi) { struct at25_data *at25 = NULL; struct spi_eeprom chip; int err; int sr; int addrlen; /* Chip description */ if (!spi->dev.platform_data) { err = at25_fw_to_chip(&spi->dev, &chip); if (err) return err; } else chip = *(struct spi_eeprom *)spi->dev.platform_data; /* For now we only support 8/16/24 bit addressing */ if (chip.flags & EE_ADDR1) addrlen = 1; else if (chip.flags & EE_ADDR2) addrlen = 2; else if (chip.flags & EE_ADDR3) addrlen = 3; else { dev_dbg(&spi->dev, "unsupported address type\n"); return -EINVAL; } /* Ping the chip ... the status register is pretty portable, * unlike probing manufacturer IDs. We do expect that system * firmware didn't write it in the past few milliseconds! */ sr = spi_w8r8(spi, AT25_RDSR); if (sr < 0 || sr & AT25_SR_nRDY) { dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr); return -ENXIO; } at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL); if (!at25) return -ENOMEM; mutex_init(&at25->lock); at25->chip = chip; at25->spi = spi_dev_get(spi); spi_set_drvdata(spi, at25); at25->addrlen = addrlen; /* Export the EEPROM bytes through sysfs, since that's convenient. * And maybe to other kernel code; it might hold a board's Ethernet * address, or board-specific calibration data generated on the * manufacturing floor. * * Default to root-only access to the data; EEPROMs often hold data * that's sensitive for read and/or write, like ethernet addresses, * security codes, board-specific manufacturing calibrations, etc. */ sysfs_bin_attr_init(&at25->bin); at25->bin.attr.name = "eeprom"; at25->bin.attr.mode = S_IRUSR; at25->bin.read = at25_bin_read; at25->mem.read = at25_mem_read; at25->bin.size = at25->chip.byte_len; if (!(chip.flags & EE_READONLY)) { at25->bin.write = at25_bin_write; at25->bin.attr.mode |= S_IWUSR; at25->mem.write = at25_mem_write; } err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin); if (err) return err; if (chip.setup) chip.setup(&at25->mem, chip.context); dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n", (at25->bin.size < 1024) ? at25->bin.size : (at25->bin.size / 1024), (at25->bin.size < 1024) ? "Byte" : "KByte", at25->chip.name, (chip.flags & EE_READONLY) ? " (readonly)" : "", at25->chip.page_size); return 0; } static int at25_remove(struct spi_device *spi) { struct at25_data *at25; at25 = spi_get_drvdata(spi); sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin); return 0; } /*-------------------------------------------------------------------------*/ static const struct of_device_id at25_of_match[] = { { .compatible = "atmel,at25", }, { } }; MODULE_DEVICE_TABLE(of, at25_of_match); static struct spi_driver at25_driver = { .driver = { .name = "at25", .owner = THIS_MODULE, .of_match_table = at25_of_match, }, .probe = at25_probe, .remove = at25_remove, }; module_spi_driver(at25_driver); MODULE_DESCRIPTION("Driver for most SPI EEPROMs"); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:at25");
gpl-2.0
TheBr0ken/vigor_aosp_kernel
drivers/staging/iio/resolver/ad2s120x.c
2342
7432
/* * ad2s120x.c simple support for the ADI Resolver to Digital Converters: AD2S1200/1205 * * Copyright (c) 2010-2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include "../iio.h" #include "../sysfs.h" #define DRV_NAME "ad2s120x" /* input pin sample and rdvel is controlled by driver */ #define AD2S120X_PN 2 /* input clock on serial interface */ #define AD2S120X_HZ 8192000 /* clock period in nano second */ #define AD2S120X_TSCLK (1000000000/AD2S120X_HZ) struct ad2s120x_state { struct mutex lock; struct iio_dev *idev; struct spi_device *sdev; unsigned short sample; unsigned short rdvel; u8 rx[2]; u8 tx[2]; }; static ssize_t ad2s120x_show_pos_vel(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; u16 pos; s16 vel; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len = sprintf(buf, "%d %c%c%c%c ", pos, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); /* delay 18 ns */ /* ndelay(18); */ gpio_set_value(st->rdvel, 0); /* ndelay(5);*/ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; vel = (st->rx[0] & 0x80) ? 0xf000 : 0; vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len += sprintf(buf + len, "%d %c%c%c%c\n", vel, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: gpio_set_value(st->rdvel, 1); /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t ad2s120x_show_pos(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; u16 pos; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); gpio_set_value(st->rdvel, 1); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len = sprintf(buf, "%d %c%c%c%c ", pos, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t ad2s120x_show_vel(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; s16 vel; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); gpio_set_value(st->rdvel, 0); /* ndelay(5);*/ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; vel = (st->rx[0] & 0x80) ? 0xf000 : 0; vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len += sprintf(buf + len, "%d %c%c%c%c\n", vel, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: gpio_set_value(st->rdvel, 1); /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static IIO_CONST_ATTR(description, "12-Bit R/D Converter with Reference Oscillator"); static IIO_DEVICE_ATTR(pos_vel, S_IRUGO, ad2s120x_show_pos_vel, NULL, 0); static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_pos, NULL, 0); static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_vel, NULL, 0); static struct attribute *ad2s120x_attributes[] = { &iio_const_attr_description.dev_attr.attr, &iio_dev_attr_pos_vel.dev_attr.attr, &iio_dev_attr_pos.dev_attr.attr, &iio_dev_attr_vel.dev_attr.attr, NULL, }; static const struct attribute_group ad2s120x_attribute_group = { .attrs = ad2s120x_attributes, }; static const struct iio_info ad2s120x_info = { .attrs = &ad2s120x_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad2s120x_probe(struct spi_device *spi) { struct ad2s120x_state *st; int pn, ret = 0; unsigned short *pins = spi->dev.platform_data; for (pn = 0; pn < AD2S120X_PN; pn++) { if (gpio_request(pins[pn], DRV_NAME)) { pr_err("%s: request gpio pin %d failed\n", DRV_NAME, pins[pn]); goto error_ret; } gpio_direction_output(pins[pn], 1); } st = kzalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_ret; } spi_set_drvdata(spi, st); mutex_init(&st->lock); st->sdev = spi; st->sample = pins[0]; st->rdvel = pins[1]; st->idev = iio_allocate_device(0); if (st->idev == NULL) { ret = -ENOMEM; goto error_free_st; } st->idev->dev.parent = &spi->dev; st->idev->info = &ad2s120x_info; st->idev->dev_data = (void *)(st); st->idev->modes = INDIO_DIRECT_MODE; ret = iio_device_register(st->idev); if (ret) goto error_free_dev; spi->max_speed_hz = AD2S120X_HZ; spi->mode = SPI_MODE_3; spi_setup(spi); return 0; error_free_dev: iio_free_device(st->idev); error_free_st: kfree(st); error_ret: for (--pn; pn >= 0; pn--) gpio_free(pins[pn]); return ret; } static int __devexit ad2s120x_remove(struct spi_device *spi) { struct ad2s120x_state *st = spi_get_drvdata(spi); iio_device_unregister(st->idev); kfree(st); return 0; } static struct spi_driver ad2s120x_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ad2s120x_probe, .remove = __devexit_p(ad2s120x_remove), }; static __init int ad2s120x_spi_init(void) { return spi_register_driver(&ad2s120x_driver); } module_init(ad2s120x_spi_init); static __exit void ad2s120x_spi_exit(void) { spi_unregister_driver(&ad2s120x_driver); } module_exit(ad2s120x_spi_exit); MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>"); MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
rishi1998/android_kernel_samsung_i9082
drivers/misc/atmel_pwm.c
3622
9383
#include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/atmel_pwm.h> /* * This is a simple driver for the PWM controller found in various newer * Atmel SOCs, including the AVR32 series and the AT91sam9263. * * Chips with current Linux ports have only 4 PWM channels, out of max 32. * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux). * Docs are inconsistent about the width of the channel counter registers; * it's at least 16 bits, but several places say 20 bits. */ #define PWM_NCHAN 4 /* max 32 */ struct pwm { spinlock_t lock; struct platform_device *pdev; u32 mask; int irq; void __iomem *base; struct clk *clk; struct pwm_channel *channel[PWM_NCHAN]; void (*handler[PWM_NCHAN])(struct pwm_channel *); }; /* global PWM controller registers */ #define PWM_MR 0x00 #define PWM_ENA 0x04 #define PWM_DIS 0x08 #define PWM_SR 0x0c #define PWM_IER 0x10 #define PWM_IDR 0x14 #define PWM_IMR 0x18 #define PWM_ISR 0x1c static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val) { __raw_writel(val, p->base + offset); } static inline u32 pwm_readl(const struct pwm *p, unsigned offset) { return __raw_readl(p->base + offset); } static inline void __iomem *pwmc_regs(const struct pwm *p, int index) { return p->base + 0x200 + index * 0x20; } static struct pwm *pwm; static void pwm_dumpregs(struct pwm_channel *ch, char *tag) { struct device *dev = &pwm->pdev->dev; dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n", tag, pwm_readl(pwm, PWM_MR), pwm_readl(pwm, PWM_SR), pwm_readl(pwm, PWM_IMR)); dev_dbg(dev, "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n", ch->index, pwm_channel_readl(ch, PWM_CMR), pwm_channel_readl(ch, PWM_CDTY), pwm_channel_readl(ch, PWM_CPRD), pwm_channel_readl(ch, PWM_CCNT)); } /** * pwm_channel_alloc - allocate an unused PWM channel * @index: identifies the channel * @ch: structure to be initialized * * Drivers allocate PWM channels according to the board's wiring, and * matching board-specific setup code. Returns zero or negative errno. */ int pwm_channel_alloc(int index, struct pwm_channel *ch) { unsigned long flags; int status = 0; /* insist on PWM init, with this signal pinned out */ if (!pwm || !(pwm->mask & 1 << index)) return -ENODEV; if (index < 0 || index >= PWM_NCHAN || !ch) return -EINVAL; memset(ch, 0, sizeof *ch); spin_lock_irqsave(&pwm->lock, flags); if (pwm->channel[index]) status = -EBUSY; else { clk_enable(pwm->clk); ch->regs = pwmc_regs(pwm, index); ch->index = index; /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */ ch->mck = clk_get_rate(pwm->clk); pwm->channel[index] = ch; pwm->handler[index] = NULL; /* channel and irq are always disabled when we return */ pwm_writel(pwm, PWM_DIS, 1 << index); pwm_writel(pwm, PWM_IDR, 1 << index); } spin_unlock_irqrestore(&pwm->lock, flags); return status; } EXPORT_SYMBOL(pwm_channel_alloc); static int pwmcheck(struct pwm_channel *ch) { int index; if (!pwm) return -ENODEV; if (!ch) return -EINVAL; index = ch->index; if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch) return -EINVAL; return index; } /** * pwm_channel_free - release a previously allocated channel * @ch: the channel being released * * The channel is completely shut down (counter and IRQ disabled), * and made available for re-use. Returns zero, or negative errno. */ int pwm_channel_free(struct pwm_channel *ch) { unsigned long flags; int t; spin_lock_irqsave(&pwm->lock, flags); t = pwmcheck(ch); if (t >= 0) { pwm->channel[t] = NULL; pwm->handler[t] = NULL; /* channel and irq are always disabled when we return */ pwm_writel(pwm, PWM_DIS, 1 << t); pwm_writel(pwm, PWM_IDR, 1 << t); clk_disable(pwm->clk); t = 0; } spin_unlock_irqrestore(&pwm->lock, flags); return t; } EXPORT_SYMBOL(pwm_channel_free); int __pwm_channel_onoff(struct pwm_channel *ch, int enabled) { unsigned long flags; int t; /* OMITTED FUNCTIONALITY: starting several channels in synch */ spin_lock_irqsave(&pwm->lock, flags); t = pwmcheck(ch); if (t >= 0) { pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t); t = 0; pwm_dumpregs(ch, enabled ? "enable" : "disable"); } spin_unlock_irqrestore(&pwm->lock, flags); return t; } EXPORT_SYMBOL(__pwm_channel_onoff); /** * pwm_clk_alloc - allocate and configure CLKA or CLKB * @prescale: from 0..10, the power of two used to divide MCK * @div: from 1..255, the linear divisor to use * * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated * clock will run with a period of (2^prescale * div) / MCK, or twice as * long if center aligned PWM output is used. The clock must later be * deconfigured using pwm_clk_free(). */ int pwm_clk_alloc(unsigned prescale, unsigned div) { unsigned long flags; u32 mr; u32 val = (prescale << 8) | div; int ret = -EBUSY; if (prescale >= 10 || div == 0 || div > 255) return -EINVAL; spin_lock_irqsave(&pwm->lock, flags); mr = pwm_readl(pwm, PWM_MR); if ((mr & 0xffff) == 0) { mr |= val; ret = PWM_CPR_CLKA; } else if ((mr & (0xffff << 16)) == 0) { mr |= val << 16; ret = PWM_CPR_CLKB; } if (ret > 0) pwm_writel(pwm, PWM_MR, mr); spin_unlock_irqrestore(&pwm->lock, flags); return ret; } EXPORT_SYMBOL(pwm_clk_alloc); /** * pwm_clk_free - deconfigure and release CLKA or CLKB * * Reverses the effect of pwm_clk_alloc(). */ void pwm_clk_free(unsigned clk) { unsigned long flags; u32 mr; spin_lock_irqsave(&pwm->lock, flags); mr = pwm_readl(pwm, PWM_MR); if (clk == PWM_CPR_CLKA) pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0)); if (clk == PWM_CPR_CLKB) pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16)); spin_unlock_irqrestore(&pwm->lock, flags); } EXPORT_SYMBOL(pwm_clk_free); /** * pwm_channel_handler - manage channel's IRQ handler * @ch: the channel * @handler: the handler to use, possibly NULL * * If the handler is non-null, the handler will be called after every * period of this PWM channel. If the handler is null, this channel * won't generate an IRQ. */ int pwm_channel_handler(struct pwm_channel *ch, void (*handler)(struct pwm_channel *ch)) { unsigned long flags; int t; spin_lock_irqsave(&pwm->lock, flags); t = pwmcheck(ch); if (t >= 0) { pwm->handler[t] = handler; pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t); t = 0; } spin_unlock_irqrestore(&pwm->lock, flags); return t; } EXPORT_SYMBOL(pwm_channel_handler); static irqreturn_t pwm_irq(int id, void *_pwm) { struct pwm *p = _pwm; irqreturn_t handled = IRQ_NONE; u32 irqstat; int index; spin_lock(&p->lock); /* ack irqs, then handle them */ irqstat = pwm_readl(pwm, PWM_ISR); while (irqstat) { struct pwm_channel *ch; void (*handler)(struct pwm_channel *ch); index = ffs(irqstat) - 1; irqstat &= ~(1 << index); ch = pwm->channel[index]; handler = pwm->handler[index]; if (handler && ch) { spin_unlock(&p->lock); handler(ch); spin_lock(&p->lock); handled = IRQ_HANDLED; } } spin_unlock(&p->lock); return handled; } static int __init pwm_probe(struct platform_device *pdev) { struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); u32 *mp = pdev->dev.platform_data; struct pwm *p; int status = -EIO; if (pwm) return -EBUSY; if (!r || irq < 0 || !mp || !*mp) return -ENODEV; if (*mp & ~((1<<PWM_NCHAN)-1)) { dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n", *mp, PWM_NCHAN); return -EINVAL; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; spin_lock_init(&p->lock); p->pdev = pdev; p->mask = *mp; p->irq = irq; p->base = ioremap(r->start, r->end - r->start + 1); if (!p->base) goto fail; p->clk = clk_get(&pdev->dev, "pwm_clk"); if (IS_ERR(p->clk)) { status = PTR_ERR(p->clk); p->clk = NULL; goto fail; } status = request_irq(irq, pwm_irq, 0, pdev->name, p); if (status < 0) goto fail; pwm = p; platform_set_drvdata(pdev, p); return 0; fail: if (p->clk) clk_put(p->clk); if (p->base) iounmap(p->base); kfree(p); return status; } static int __exit pwm_remove(struct platform_device *pdev) { struct pwm *p = platform_get_drvdata(pdev); if (p != pwm) return -EINVAL; clk_enable(pwm->clk); pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1); pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1); clk_disable(pwm->clk); pwm = NULL; free_irq(p->irq, p); clk_put(p->clk); iounmap(p->base); kfree(p); return 0; } static struct platform_driver atmel_pwm_driver = { .driver = { .name = "atmel_pwm", .owner = THIS_MODULE, }, .remove = __exit_p(pwm_remove), /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states; * and all AT91sam9263 states, albeit at reduced clock rate if * MCK becomes the slow clock (i.e. what Linux labels STR). */ }; static int __init pwm_init(void) { return platform_driver_probe(&atmel_pwm_driver, pwm_probe); } module_init(pwm_init); static void __exit pwm_exit(void) { platform_driver_unregister(&atmel_pwm_driver); } module_exit(pwm_exit); MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:atmel_pwm");
gpl-2.0
navsdev/kernel-nk1-negalite-lt02ltespr
drivers/w1/slaves/w1_bq27000.c
4902
2499
/* * drivers/w1/slaves/w1_bq27000.c * * Copyright (C) 2007 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/power/bq27x00_battery.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" #define HDQ_CMD_READ (0) #define HDQ_CMD_WRITE (1<<7) static int F_ID; static int w1_bq27000_read(struct device *dev, unsigned int reg) { u8 val; struct w1_slave *sl = container_of(dev->parent, struct w1_slave, dev); mutex_lock(&sl->master->mutex); w1_write_8(sl->master, HDQ_CMD_READ | reg); val = w1_read_8(sl->master); mutex_unlock(&sl->master->mutex); return val; } static struct bq27000_platform_data bq27000_battery_info = { .read = w1_bq27000_read, .name = "bq27000-battery", }; static int w1_bq27000_add_slave(struct w1_slave *sl) { int ret; struct platform_device *pdev; pdev = platform_device_alloc("bq27000-battery", -1); if (!pdev) { ret = -ENOMEM; return ret; } ret = platform_device_add_data(pdev, &bq27000_battery_info, sizeof(bq27000_battery_info)); pdev->dev.parent = &sl->dev; ret = platform_device_add(pdev); if (ret) goto pdev_add_failed; dev_set_drvdata(&sl->dev, pdev); goto success; pdev_add_failed: platform_device_unregister(pdev); success: return ret; } static void w1_bq27000_remove_slave(struct w1_slave *sl) { struct platform_device *pdev = dev_get_drvdata(&sl->dev); platform_device_unregister(pdev); } static struct w1_family_ops w1_bq27000_fops = { .add_slave = w1_bq27000_add_slave, .remove_slave = w1_bq27000_remove_slave, }; static struct w1_family w1_bq27000_family = { .fid = 1, .fops = &w1_bq27000_fops, }; static int __init w1_bq27000_init(void) { if (F_ID) w1_bq27000_family.fid = F_ID; return w1_register_family(&w1_bq27000_family); } static void __exit w1_bq27000_exit(void) { w1_unregister_family(&w1_bq27000_family); } module_init(w1_bq27000_init); module_exit(w1_bq27000_exit); module_param(F_ID, int, S_IRUSR); MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Texas Instruments Ltd"); MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip");
gpl-2.0